From d0f5b893a8f3448882f18cca7cd2fec02c708874 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Mon, 29 Aug 2016 11:34:42 -0700 Subject: [PATCH 01/24] HIVE-14635 : establish a separate path for FSOP to write into final path (Sergey Shelukhin) --- .../api/hive_metastoreConstants.java | 30 +- .../hadoop/hive/ql/exec/FileSinkOperator.java | 127 ++++--- .../hadoop/hive/ql/exec/JoinOperator.java | 1 + .../apache/hadoop/hive/ql/exec/MoveTask.java | 126 +++---- .../apache/hadoop/hive/ql/exec/Utilities.java | 17 +- .../apache/hadoop/hive/ql/metadata/Hive.java | 98 +++++- .../hive/ql/optimizer/GenMapRedUtils.java | 4 + .../optimizer/unionproc/UnionProcFactory.java | 3 + .../hive/ql/parse/GenTezProcContext.java | 2 +- .../hadoop/hive/ql/parse/GenTezUtils.java | 4 + .../hive/ql/parse/SemanticAnalyzer.java | 311 ++++++++++-------- .../plan/ConditionalResolverMergeFiles.java | 5 + .../hadoop/hive/ql/plan/FileSinkDesc.java | 11 +- .../hadoop/hive/ql/plan/LoadFileDesc.java | 2 + .../hadoop/hive/ql/plan/LoadTableDesc.java | 24 +- .../apache/hadoop/hive/ql/plan/MoveWork.java | 1 + .../hive/ql/exec/TestFileSinkOperator.java | 2 +- ql/src/test/queries/clientpositive/mm_all.q | 63 ++++ .../test/queries/clientpositive/mm_current.q | 11 + .../clientpositive/llap/mm_current.q.out | 21 ++ 20 files changed, 567 insertions(+), 296 deletions(-) create mode 100644 ql/src/test/queries/clientpositive/mm_all.q create mode 100644 ql/src/test/queries/clientpositive/mm_current.q create mode 100644 ql/src/test/results/clientpositive/llap/mm_current.q.out diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java index 8de8896bff4d..6a5f550242d4 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java @@ -6,34 +6,7 @@ */ package org.apache.hadoop.hive.metastore.api; -import org.apache.thrift.scheme.IScheme; -import org.apache.thrift.scheme.SchemeFactory; -import org.apache.thrift.scheme.StandardScheme; - -import org.apache.thrift.scheme.TupleScheme; -import org.apache.thrift.protocol.TTupleProtocol; -import org.apache.thrift.protocol.TProtocolException; -import org.apache.thrift.EncodingUtils; -import org.apache.thrift.TException; -import org.apache.thrift.async.AsyncMethodCallback; -import org.apache.thrift.server.AbstractNonblockingServer.*; -import java.util.List; -import java.util.ArrayList; -import java.util.Map; -import java.util.HashMap; -import java.util.EnumMap; -import java.util.Set; -import java.util.HashSet; -import java.util.EnumSet; -import java.util.Collections; -import java.util.BitSet; -import java.nio.ByteBuffer; -import java.util.Arrays; -import javax.annotation.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) + public class hive_metastoreConstants { public static final String DDL_TIME = "transient_lastDdlTime"; @@ -84,4 +57,5 @@ public class hive_metastoreConstants { public static final String TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties"; + public static final String TABLE_IS_MM = "hivecommit"; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index b0c3d3f16232..755120f11cd6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -143,8 +143,8 @@ public static interface RecordWriter { } public class FSPaths implements Cloneable { - Path tmpPath; - Path taskOutputTempPath; + private Path tmpPath; + private Path taskOutputTempPath; Path[] outPaths; Path[] finalPaths; RecordWriter[] outWriters; @@ -152,10 +152,21 @@ public class FSPaths implements Cloneable { Stat stat; int acidLastBucket = -1; int acidFileOffset = -1; + private boolean isMmTable; + + public FSPaths(Path specPath, boolean isMmTable) { + this.isMmTable = isMmTable; + if (!isMmTable) { + tmpPath = Utilities.toTempPath(specPath); + taskOutputTempPath = Utilities.toTaskTempPath(specPath); + } else { + tmpPath = specPath; + taskOutputTempPath = null; // Should not be used. + } + Utilities.LOG14535.info("new FSPaths for " + numFiles + " files, dynParts = " + bDynParts + + ": tmpPath " + tmpPath + ", task path " + taskOutputTempPath + + " (spec path " + specPath + ")", new Exception()); - public FSPaths(Path specPath) { - tmpPath = Utilities.toTempPath(specPath); - taskOutputTempPath = Utilities.toTaskTempPath(specPath); outPaths = new Path[numFiles]; finalPaths = new Path[numFiles]; outWriters = new RecordWriter[numFiles]; @@ -207,10 +218,12 @@ public void closeWriters(boolean abort) throws HiveException { } private void commit(FileSystem fs) throws HiveException { + if (isMmTable) return; // TODO#: need to propagate to MoveTask instead for (int idx = 0; idx < outPaths.length; ++idx) { try { if ((bDynParts || isSkewedStoredAsSubDirectories) && !fs.exists(finalPaths[idx].getParent())) { + Utilities.LOG14535.info("commit making path for dyn/skew: " + finalPaths[idx].getParent()); fs.mkdirs(finalPaths[idx].getParent()); } boolean needToRename = true; @@ -229,6 +242,7 @@ private void commit(FileSystem fs) throws HiveException { needToRename = false; } } + Utilities.LOG14535.info("commit potentially moving " + outPaths[idx] + " to " + finalPaths[idx]); if (needToRename && outPaths[idx] != null && !fs.rename(outPaths[idx], finalPaths[idx])) { throw new HiveException("Unable to rename output from: " + outPaths[idx] + " to: " + finalPaths[idx]); @@ -260,6 +274,54 @@ public void abortWriters(FileSystem fs, boolean abort, boolean delete) throws Hi public Stat getStat() { return stat; } + + public void configureDynPartPath(String dirName, String childSpecPathDynLinkedPartitions) { + dirName = (childSpecPathDynLinkedPartitions == null) ? dirName : + dirName + Path.SEPARATOR + childSpecPathDynLinkedPartitions; + tmpPath = new Path(tmpPath, dirName); + if (taskOutputTempPath != null) { + taskOutputTempPath = new Path(taskOutputTempPath, dirName); + } + } + + public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeTable, + boolean isSkewedStoredAsSubDirectories) { + if (isNativeTable) { + String extension = Utilities.getFileExtension(jc, isCompressed, hiveOutputFormat); + if (!isMmTable) { + if (!bDynParts && !isSkewedStoredAsSubDirectories) { + finalPaths[filesIdx] = getFinalPath(taskId, parent, extension); + } else { + finalPaths[filesIdx] = getFinalPath(taskId, tmpPath, extension); + } + outPaths[filesIdx] = getTaskOutPath(taskId); + } else { + if (!bDynParts && !isSkewedStoredAsSubDirectories) { + finalPaths[filesIdx] = getFinalPath(taskId, specPath, extension); + } else { + // TODO# wrong! + finalPaths[filesIdx] = getFinalPath(taskId, specPath, extension); + } + outPaths[filesIdx] = finalPaths[filesIdx]; + } + if (isInfoEnabled) { + LOG.info("Final Path: FS " + finalPaths[filesIdx]); + if (isInfoEnabled && !isMmTable) { + LOG.info("Writing to temp file: FS " + outPaths[filesIdx]); + } + } + } else { + finalPaths[filesIdx] = outPaths[filesIdx] = specPath; + } + } + + public Path getTmpPath() { + return tmpPath; + } + + public Path getTaskOutputTempPath() { + return taskOutputTempPath; + } } // class FSPaths private static final long serialVersionUID = 1L; @@ -297,6 +359,7 @@ public Stat getStat() { protected boolean filesCreated = false; private void initializeSpecPath() { + // TODO# special case #N // For a query of the type: // insert overwrite table T1 // select * from (subq1 union all subq2)u; @@ -397,7 +460,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { } if (!bDynParts) { - fsp = new FSPaths(specPath); + fsp = new FSPaths(specPath, conf.isMmTable()); // Create all the files - this is required because empty files need to be created for // empty buckets @@ -411,6 +474,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { .getVar(hconf, HIVE_TEMPORARY_TABLE_STORAGE)); if (isTemporary && fsp != null && tmpStorage != StoragePolicyValue.DEFAULT) { + assert !conf.isMmTable(); // Not supported for temp tables. final Path outputPath = fsp.taskOutputTempPath; StoragePolicyShim shim = ShimLoader.getHadoopShims() .getStoragePolicyShim(fs); @@ -557,7 +621,7 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { assert filesIdx == numFiles; // in recent hadoop versions, use deleteOnExit to clean tmp files. - if (isNativeTable && fs != null && fsp != null) { + if (isNativeTable && fs != null && fsp != null && !conf.isMmTable()) { autoDelete = fs.deleteOnExit(fsp.outPaths[0]); } } catch (Exception e) { @@ -571,34 +635,16 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { protected void createBucketForFileIdx(FSPaths fsp, int filesIdx) throws HiveException { try { - if (isNativeTable) { - fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, null); - if (isInfoEnabled) { - LOG.info("Final Path: FS " + fsp.finalPaths[filesIdx]); - } - fsp.outPaths[filesIdx] = fsp.getTaskOutPath(taskId); - if (isInfoEnabled) { - LOG.info("Writing to temp file: FS " + fsp.outPaths[filesIdx]); - } - } else { - fsp.finalPaths[filesIdx] = fsp.outPaths[filesIdx] = specPath; - } - // The reason to keep these instead of using - // OutputFormat.getRecordWriter() is that - // getRecordWriter does not give us enough control over the file name that - // we create. - String extension = Utilities.getFileExtension(jc, isCompressed, hiveOutputFormat); - if (!bDynParts && !this.isSkewedStoredAsSubDirectories) { - fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, parent, extension); - } else { - fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, extension); - } + fsp.initializeBucketPaths(filesIdx, taskId, isNativeTable, isSkewedStoredAsSubDirectories); + Utilities.LOG14535.info("createBucketForFileIdx " + filesIdx + ": final path " + fsp.finalPaths[filesIdx] + + "; out path " + fsp.outPaths[filesIdx] +" (spec path " + specPath + ", tmp path " + + fsp.getTmpPath() + ", task " + taskId + ")", new Exception()); if (isInfoEnabled) { LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]); } - if (isNativeTable) { + if (isNativeTable && !conf.isMmTable()) { // in recent hadoop versions, use deleteOnExit to clean tmp files. autoDelete = fs.deleteOnExit(fsp.outPaths[filesIdx]); } @@ -828,6 +874,7 @@ private int findWriterOffset(Object row) throws HiveException { protected FSPaths lookupListBucketingPaths(String lbDirName) throws HiveException { FSPaths fsp2 = valToPaths.get(lbDirName); if (fsp2 == null) { + Utilities.LOG14535.info("lookupListBucketingPaths for " + lbDirName); fsp2 = createNewPaths(lbDirName); } return fsp2; @@ -841,18 +888,10 @@ protected FSPaths lookupListBucketingPaths(String lbDirName) throws HiveExceptio * @throws HiveException */ private FSPaths createNewPaths(String dirName) throws HiveException { - FSPaths fsp2 = new FSPaths(specPath); - if (childSpecPathDynLinkedPartitions != null) { - fsp2.tmpPath = new Path(fsp2.tmpPath, - dirName + Path.SEPARATOR + childSpecPathDynLinkedPartitions); - fsp2.taskOutputTempPath = - new Path(fsp2.taskOutputTempPath, - dirName + Path.SEPARATOR + childSpecPathDynLinkedPartitions); - } else { - fsp2.tmpPath = new Path(fsp2.tmpPath, dirName); - fsp2.taskOutputTempPath = - new Path(fsp2.taskOutputTempPath, dirName); - } + FSPaths fsp2 = new FSPaths(specPath, conf.isMmTable()); // TODO# this will break + fsp2.configureDynPartPath(dirName, childSpecPathDynLinkedPartitions); + Utilities.LOG14535.info("creating new paths for " + dirName + ", childSpec " + childSpecPathDynLinkedPartitions + + ": tmpPath " + fsp2.getTmpPath() + ", task path " + fsp2.getTaskOutputTempPath()); if(!conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { createBucketFiles(fsp2); valToPaths.put(dirName, fsp2); @@ -1082,7 +1121,7 @@ public void closeOp(boolean abort) throws HiveException { // Hadoop always call close() even if an Exception was thrown in map() or // reduce(). for (FSPaths fsp : valToPaths.values()) { - fsp.abortWriters(fs, abort, !autoDelete && isNativeTable); + fsp.abortWriters(fs, abort, !autoDelete && isNativeTable && !conf.isMmTable()); } } fsp = prevFsp = null; @@ -1193,7 +1232,7 @@ private void publishStats() throws HiveException { for (Map.Entry entry : valToPaths.entrySet()) { String fspKey = entry.getKey(); // DP/LB FSPaths fspValue = entry.getValue(); - + // TODO# useful code as reference, as it takes apart the crazy paths // for bucketed tables, hive.optimize.sort.dynamic.partition optimization // adds the taskId to the fspKey. if (conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java index 08cc4b4c6ec6..8f319ba57477 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java @@ -233,6 +233,7 @@ private void mvFileToFinalPath(Path specPath, Configuration hconf, // point, updates from speculative tasks still writing to tmpPath // will not appear in finalPath. log.info("Moving tmp dir: " + tmpPath + " to: " + intermediatePath); + Utilities.LOG14535.info("Moving tmp dir: " + tmpPath + " to: " + intermediatePath + "(spec " + specPath + ")"); Utilities.rename(fs, tmpPath, intermediatePath); // Step2: remove any tmp file or double-committed output files Utilities.removeTempOrDuplicateFiles(fs, intermediatePath); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 546919bd3c9b..14a84cde0ee8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -252,6 +252,7 @@ public int execute(DriverContext driverContext) { if (lfd != null) { Path targetPath = lfd.getTargetDir(); Path sourcePath = lfd.getSourcePath(); + Utilities.LOG14535.info("MoveTask moving LFD " + sourcePath + " to " + targetPath); moveFile(sourcePath, targetPath, lfd.getIsDfsDir()); } @@ -268,6 +269,7 @@ public int execute(DriverContext driverContext) { if (!fs.exists(destPath.getParent())) { fs.mkdirs(destPath.getParent()); } + Utilities.LOG14535.info("MoveTask moving LMFD " + srcPath + " to " + destPath); moveFile(srcPath, destPath, isDfsDir); i++; } @@ -288,71 +290,17 @@ public int execute(DriverContext driverContext) { mesg.append(')'); } String mesg_detail = " from " + tbd.getSourcePath(); + Utilities.LOG14535.info("" + mesg.toString() + " " + mesg_detail); console.printInfo(mesg.toString(), mesg_detail); Table table = db.getTable(tbd.getTable().getTableName()); - if (work.getCheckFileFormat()) { - // Get all files from the src directory - FileStatus[] dirs; - ArrayList files; - FileSystem srcFs; // source filesystem - try { - srcFs = tbd.getSourcePath().getFileSystem(conf); - dirs = srcFs.globStatus(tbd.getSourcePath()); - files = new ArrayList(); - for (int i = 0; (dirs != null && i < dirs.length); i++) { - files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER))); - // We only check one file, so exit the loop when we have at least - // one. - if (files.size() > 0) { - break; - } - } - } catch (IOException e) { - throw new HiveException( - "addFiles: filesystem error in check phase", e); - } - - // handle file format check for table level - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) { - boolean flag = true; - // work.checkFileFormat is set to true only for Load Task, so assumption here is - // dynamic partition context is null - if (tbd.getDPCtx() == null) { - if (tbd.getPartitionSpec() == null || tbd.getPartitionSpec().isEmpty()) { - // Check if the file format of the file matches that of the table. - flag = HiveFileFormatUtils.checkInputFormat( - srcFs, conf, tbd.getTable().getInputFileFormatClass(), files); - } else { - // Check if the file format of the file matches that of the partition - Partition oldPart = db.getPartition(table, tbd.getPartitionSpec(), false); - if (oldPart == null) { - // this means we have just created a table and are specifying partition in the - // load statement (without pre-creating the partition), in which case lets use - // table input format class. inheritTableSpecs defaults to true so when a new - // partition is created later it will automatically inherit input format - // from table object - flag = HiveFileFormatUtils.checkInputFormat( - srcFs, conf, tbd.getTable().getInputFileFormatClass(), files); - } else { - flag = HiveFileFormatUtils.checkInputFormat( - srcFs, conf, oldPart.getInputFormatClass(), files); - } - } - if (!flag) { - throw new HiveException( - "Wrong file format. Please check the file's format."); - } - } else { - LOG.warn("Skipping file format check as dpCtx is not null"); - } - } - } + checkFileFormats(db, tbd, table); // Create a data container DataContainer dc = null; if (tbd.getPartitionSpec().size() == 0) { dc = new DataContainer(table.getTTable()); + Utilities.LOG14535.info("loadTable called from " + tbd.getSourcePath() + " into " + tbd.getTable()); db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getReplace(), work.isSrcLocal(), isSkewedStoredAsDirs(tbd), work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, @@ -495,10 +443,11 @@ public int execute(DriverContext driverContext) { List partVals = MetaStoreUtils.getPvals(table.getPartCols(), tbd.getPartitionSpec()); db.validatePartitionNameCharacters(partVals); + Utilities.LOG14535.info("loadPartition called from " + tbd.getSourcePath() + " into " + tbd.getTable()); db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getPartitionSpec(), tbd.getReplace(), tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(), - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, hasFollowingStatsTask()); + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, hasFollowingStatsTask(), tbd.isMmTable()); Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); if (bucketCols != null || sortCols != null) { @@ -547,6 +496,67 @@ public int execute(DriverContext driverContext) { } } + private void checkFileFormats(Hive db, LoadTableDesc tbd, Table table) + throws HiveException { + if (work.getCheckFileFormat()) { + // Get all files from the src directory + FileStatus[] dirs; + ArrayList files; + FileSystem srcFs; // source filesystem + try { + srcFs = tbd.getSourcePath().getFileSystem(conf); + dirs = srcFs.globStatus(tbd.getSourcePath()); + files = new ArrayList(); + for (int i = 0; (dirs != null && i < dirs.length); i++) { + files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER))); + // We only check one file, so exit the loop when we have at least + // one. + if (files.size() > 0) { + break; + } + } + } catch (IOException e) { + throw new HiveException( + "addFiles: filesystem error in check phase", e); + } + + // handle file format check for table level + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) { + boolean flag = true; + // work.checkFileFormat is set to true only for Load Task, so assumption here is + // dynamic partition context is null + if (tbd.getDPCtx() == null) { + if (tbd.getPartitionSpec() == null || tbd.getPartitionSpec().isEmpty()) { + // Check if the file format of the file matches that of the table. + flag = HiveFileFormatUtils.checkInputFormat( + srcFs, conf, tbd.getTable().getInputFileFormatClass(), files); + } else { + // Check if the file format of the file matches that of the partition + Partition oldPart = db.getPartition(table, tbd.getPartitionSpec(), false); + if (oldPart == null) { + // this means we have just created a table and are specifying partition in the + // load statement (without pre-creating the partition), in which case lets use + // table input format class. inheritTableSpecs defaults to true so when a new + // partition is created later it will automatically inherit input format + // from table object + flag = HiveFileFormatUtils.checkInputFormat( + srcFs, conf, tbd.getTable().getInputFileFormatClass(), files); + } else { + flag = HiveFileFormatUtils.checkInputFormat( + srcFs, conf, oldPart.getInputFormatClass(), files); + } + } + if (!flag) { + throw new HiveException( + "Wrong file format. Please check the file's format."); + } + } else { + LOG.warn("Skipping file format check as dpCtx is not null"); + } + } + } + } + private boolean isSkewedStoredAsDirs(LoadTableDesc tbd) { return (tbd.getLbCtx() == null) ? false : tbd.getLbCtx() .isSkewedStoredAsDir(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index a542dc4a364e..5bc04e19f028 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -208,6 +208,9 @@ @SuppressWarnings("nls") public final class Utilities { + // TODO: remove when merging + public static final Logger LOG14535 = LoggerFactory.getLogger("Log14535"); + /** * The object in the reducer are composed of these top level fields. */ @@ -1405,6 +1408,7 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, Path tmpPath = Utilities.toTempPath(specPath); Path taskTmpPath = Utilities.toTaskTempPath(specPath); if (success) { + // TODO# specPath instead of tmpPath FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse( tmpPath, ((dpCtx == null) ? 1 : dpCtx.getNumDPCols()), fs); if(statuses != null && statuses.length > 0) { @@ -1414,17 +1418,21 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, if (emptyBuckets.size() > 0) { createEmptyBuckets(hconf, emptyBuckets, conf, reporter); } - // move to the file destination - log.info("Moving tmp dir: " + tmpPath + " to: " + specPath); + Utilities.LOG14535.info("Moving tmp dir: " + tmpPath + " to: " + specPath); Utilities.renameOrMoveFiles(fs, tmpPath, specPath); } + List paths = new ArrayList<>(); + // TODO#: HERE listFilesToCommit(specPath, fs, paths); } else { + Utilities.LOG14535.info("deleting tmpPath " + tmpPath); fs.delete(tmpPath, true); } + Utilities.LOG14535.info("deleting taskTmpPath " + taskTmpPath); fs.delete(taskTmpPath, true); } + /** * Check the existence of buckets according to bucket specification. Create empty buckets if * needed. @@ -1465,6 +1473,7 @@ private static void createEmptyBuckets(Configuration hconf, List paths, } for (Path path : paths) { + Utilities.LOG14535.info("creating empty bucket for " + path); RecordWriter writer = HiveFileFormatUtils.getRecordWriter( jc, hiveOutputFormat, outputClass, isCompressed, tableInfo.getProperties(), path, reporter); @@ -1576,15 +1585,19 @@ public static HashMap removeTempOrDuplicateFiles(FileStatus[ for (FileStatus one : items) { if (isTempPath(one)) { + Utilities.LOG14535.info("removeTempOrDuplicateFiles deleting " + one.getPath(), new Exception()); if (!fs.delete(one.getPath(), true)) { throw new IOException("Unable to delete tmp file: " + one.getPath()); } } else { String taskId = getPrefixedTaskIdFromFilename(one.getPath().getName()); + Utilities.LOG14535.info("removeTempOrDuplicateFiles pondering " + one.getPath() + ", taskId " + taskId, new Exception()); + FileStatus otherFile = taskIdToFile.get(taskId); if (otherFile == null) { taskIdToFile.put(taskId, one); } else { + // TODO# file choice! // Compare the file sizes of all the attempt files for the same task, the largest win // any attempt files could contain partial results (due to task failures or // speculative runs), but the largest should be the correct one since the result diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 5f53aef44212..7d8c961a53e8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1472,10 +1472,12 @@ public Database getDatabaseCurrent() throws HiveException { public void loadPartition(Path loadPath, String tableName, Map partSpec, boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, - boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException { + boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask, boolean isMmTable) + throws HiveException { Table tbl = getTable(tableName); + // TODO# dbl check if table is still mm for consistency loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs, - isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask); + isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask, isMmTable); } /** @@ -1499,10 +1501,10 @@ public void loadPartition(Path loadPath, String tableName, * If the source directory is LOCAL * @param isAcid true if this is an ACID operation */ - public Partition loadPartition(Path loadPath, Table tbl, - Map partSpec, boolean replace, - boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, - boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException { + public Partition loadPartition(Path loadPath, Table tbl, Map partSpec, + boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, + boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask, boolean isMmTable) + throws HiveException { Path tblDataLocationPath = tbl.getDataLocation(); try { @@ -1540,17 +1542,25 @@ public Partition loadPartition(Path loadPath, Table tbl, } else { newPartPath = oldPartPath; } - List newFiles = null; - if (replace || (oldPart == null && !isAcid)) { - replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(), - isSrcLocal); - } else { - if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null) { - newFiles = Collections.synchronizedList(new ArrayList()); + List newFiles = null, mmFiles = null; + if (isMmTable) { + mmFiles = handleMicromanagedPartition( + loadPath, tbl, replace, oldPart, newPartPath, isAcid); + if (areEventsForDmlNeeded(tbl, oldPart)) { + newFiles = mmFiles; } + } else { + if (replace || (oldPart == null && !isAcid)) { + replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(), + isSrcLocal); + } else { + if (areEventsForDmlNeeded(tbl, oldPart)) { + newFiles = Collections.synchronizedList(new ArrayList()); + } - FileSystem fs = tbl.getDataLocation().getFileSystem(conf); - Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid, newFiles); + FileSystem fs = tbl.getDataLocation().getFileSystem(conf); + Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid, newFiles); + } } Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath); alterPartitionSpecInMemory(tbl, partSpec, newTPart.getTPartition(), inheritTableSpecs, newPartPath.toString()); @@ -1621,6 +1631,58 @@ public Partition loadPartition(Path loadPath, Table tbl, } } + + private boolean areEventsForDmlNeeded(Table tbl, Partition oldPart) { + return conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null; + } + + + private List handleMicromanagedPartition(Path loadPath, Table tbl, boolean replace, + Partition oldPart, Path newPartPath, boolean isAcid) throws HiveException { + Utilities.LOG14535.info("not moving " + loadPath + " to " + newPartPath); + if (replace) { + // TODO#: would need a list of new files to support. Then, old ones only would need + // to be removed from MS (and FS). Also, per-partition IOW is problematic for + // the prefix case. + throw new HiveException("Replace and MM are not supported"); + } + if (isAcid) { + // TODO# need to make sure ACID writes to final directories. Otherwise, might need to move. + throw new HiveException("ACID and MM are not supported"); + } + List newFiles = new ArrayList(); + FileStatus[] srcs; + FileSystem srcFs; + try { + srcFs = loadPath.getFileSystem(conf); + srcs = srcFs.globStatus(loadPath); + } catch (IOException e) { + LOG.error("Error listing files", e); + throw new HiveException(e); + } + if (srcs == null) { + LOG.info("No sources specified: " + loadPath); + return newFiles; + } + + // TODO: just like the move path, we only do one level of recursion. + for (FileStatus src : srcs) { + if (src.isDirectory()) { + try { + for (FileStatus srcFile : + srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER)) { + newFiles.add(srcFile.getPath()); + } + } catch (IOException e) { + throw new HiveException(e); + } + } else { + newFiles.add(src.getPath()); + } + } + return newFiles; + } + private void setStatsPropAndAlterPartition(boolean hasFollowingStatsTask, Table tbl, Partition newTPart) throws MetaException, TException { EnvironmentContext environmentContext = null; @@ -1813,9 +1875,10 @@ public Void call() throws Exception { LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec); // load the partition + Utilities.LOG14535.info("loadPartition called for DPP from " + partPath + " to " + tbl.getTableName()); Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace, true, listBucketingEnabled, - false, isAcid, hasFollowingStatsTask); + false, isAcid, hasFollowingStatsTask, false); // TODO# here partitionsMap.put(fullPartSpec, newPartition); if (inPlaceEligible) { @@ -2803,6 +2866,9 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, destPath = mvFile(conf, srcP, destPath, isSrcLocal, srcFs, destFs, name, filetype); } + if (inheritPerms) { + HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false); + } if (null != newFiles) { newFiles.add(destPath); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index cea99e1423c0..4e44d490ec16 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1268,6 +1268,7 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, // Create a FileSink operator TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone(); + // TODO# special case #N - merge FS is created here FileSinkDesc fsOutputDesc = new FileSinkDesc(finalName, ts, conf.getBoolVar(ConfVars.COMPRESSRESULT)); FileSinkOperator fsOutput = (FileSinkOperator) OperatorFactory.getAndMakeChild( @@ -1806,6 +1807,7 @@ public static Path createMoveTask(Task currTask, boolean // Create the required temporary file in the HDFS location if the destination // path of the FileSinkOperator table is a blobstore path. + // TODO# HERE Path tmpDir = baseCtx.getTempDirForPath(fileSinkDesc.getDestPath()); // Change all the linked file sink descriptors @@ -1813,9 +1815,11 @@ public static Path createMoveTask(Task currTask, boolean for (FileSinkDesc fsConf:fileSinkDesc.getLinkedFileSinkDesc()) { fsConf.setParentDir(tmpDir); fsConf.setDirName(new Path(tmpDir, fsConf.getDirName().getName())); + Utilities.LOG14535.info("createMoveTask setting tmpDir for LinkedFileSink chDir " + fsConf.getDirName() + "; new parent " + tmpDir + ", dest was " + fileSinkDesc.getDestPath()); } } else { fileSinkDesc.setDirName(tmpDir); + Utilities.LOG14535.info("createMoveTask setting tmpDir for LinkedFileSink chDir " + tmpDir + "; dest was " + fileSinkDesc.getDestPath()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java index 2a7f3d4f037f..7f7d19226aa9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.ql.exec.OperatorFactory; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.UnionOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; @@ -217,11 +218,13 @@ private void pushOperatorsAboveUnion(UnionOperator union, // each parent List fileDescLists = new ArrayList(); + // TODO# special case #N - unions for (Operator parent : parents) { FileSinkDesc fileSinkDesc = (FileSinkDesc) fileSinkOp.getConf().clone(); fileSinkDesc.setDirName(new Path(parentDirName, parent.getIdentifier())); fileSinkDesc.setLinkedFileSink(true); fileSinkDesc.setParentDir(parentDirName); + Utilities.LOG14535.info("Created LinkedFileSink for union " + fileSinkDesc.getDirName() + "; parent " + parentDirName); parent.setChildOperators(null); Operator tmpFileSinkOp = OperatorFactory.getAndMakeChild(fileSinkDesc, parent.getSchema(), parent); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java index 0c160acf46eb..e1fc10365c05 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java @@ -65,7 +65,7 @@ public class GenTezProcContext implements NodeProcessorCtx{ public final ParseContext parseContext; public final HiveConf conf; - public final List> moveTask; + public final List> moveTask; // TODO# // rootTasks is the entry point for all generated tasks public final List> rootTasks; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java index 6715dbfc41b6..f4b23e6ae34b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java @@ -305,7 +305,9 @@ public static void removeUnionOperators(GenTezProcContext context, BaseWork work linked.add(desc); desc.setIndexInTezUnion(linked.size()); + // TODO# special case #N - unions (tez) desc.setDirName(new Path(path, "" + desc.getIndexInTezUnion())); + Utilities.LOG14535.info("removing union - new desc with " + desc.getDirName() + "; parent " + path); desc.setLinkedFileSink(true); desc.setParentDir(path); desc.setLinkedFileSinkDesc(linked); @@ -373,6 +375,8 @@ public static void processFileSink(GenTezProcContext context, FileSinkOperator f // If underlying data is RCFile or OrcFile, RCFileBlockMerge task or // OrcFileStripeMerge task would be created. LOG.info("using CombineHiveInputformat for the merge job"); + Utilities.LOG14535.info("merging files from " + fileSink.getConf().getDirName() + " to " + finalName); + // TODO# special case #N - merge GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName, context.dependencyTask, context.moveTask, hconf, context.currentTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 66589fe8b6ec..c54a1719e3f1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryProperties; @@ -6542,6 +6543,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) LoadTableDesc ltd = null; ListBucketingCtx lbCtx = null; Map partSpec = null; + boolean isMmTable = false; switch (dest_type.intValue()) { case QBMetaData.DEST_TABLE: { @@ -6551,70 +6553,27 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) destTableIsTemporary = dest_tab.isTemporary(); // Is the user trying to insert into a external tables - if ((!conf.getBoolVar(HiveConf.ConfVars.HIVE_INSERT_INTO_EXTERNAL_TABLES)) && - (dest_tab.getTableType().equals(TableType.EXTERNAL_TABLE))) { - throw new SemanticException( - ErrorMsg.INSERT_EXTERNAL_TABLE.getMsg(dest_tab.getTableName())); - } + checkExternalTable(dest_tab); partSpec = qbm.getPartSpecForAlias(dest); dest_path = dest_tab.getPath(); - // If the query here is an INSERT_INTO and the target is an immutable table, - // verify that our destination is empty before proceeding - if (dest_tab.isImmutable() && - qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),dest_tab.getTableName())){ - try { - FileSystem fs = dest_path.getFileSystem(conf); - if (! MetaStoreUtils.isDirEmpty(fs,dest_path)){ - LOG.warn("Attempted write into an immutable table : " - + dest_tab.getTableName() + " : " + dest_path); - throw new SemanticException( - ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(dest_tab.getTableName())); - } - } catch (IOException ioe) { - LOG.warn("Error while trying to determine if immutable table has any data : " - + dest_tab.getTableName() + " : " + dest_path); - throw new SemanticException(ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(ioe.getMessage())); - } - } + checkImmutableTable(qb, dest_tab, dest_path, false); - // check for partition - List parts = dest_tab.getPartitionKeys(); - if (parts != null && parts.size() > 0) { // table is partitioned - if (partSpec == null || partSpec.size() == 0) { // user did NOT specify partition - throw new SemanticException(generateErrorMessage( - qb.getParseInfo().getDestForClause(dest), - ErrorMsg.NEED_PARTITION_ERROR.getMsg())); - } - dpCtx = qbm.getDPCtx(dest); - if (dpCtx == null) { - dest_tab.validatePartColumnNames(partSpec, false); - dpCtx = new DynamicPartitionCtx(dest_tab, partSpec, - conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), - conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); - qbm.setDPCtx(dest, dpCtx); - } - - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP - throw new SemanticException(generateErrorMessage( - qb.getParseInfo().getDestForClause(dest), - ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg())); - } - if (dpCtx.getSPPath() != null) { - dest_path = new Path(dest_tab.getPath(), dpCtx.getSPPath()); - } - if ((dest_tab.getNumBuckets() > 0)) { - dpCtx.setNumBuckets(dest_tab.getNumBuckets()); - } + // Check for dynamic partitions. + dpCtx = checkDynPart(qb, qbm, dest_tab, partSpec, dest); + if (dpCtx != null && dpCtx.getSPPath() != null) { + dest_path = new Path(dest_tab.getPath(), dpCtx.getSPPath()); } boolean isNonNativeTable = dest_tab.isNonNative(); - if (isNonNativeTable) { + isMmTable = isMmTable(dest_tab); + if (isNonNativeTable || isMmTable) { queryTmpdir = dest_path; } else { queryTmpdir = ctx.getTempDirForPath(dest_path); } + Utilities.LOG14535.info("createFS for table specifying " + queryTmpdir + " from " + dest_path); if (dpCtx != null) { // set the root of the temporary path where dynamic partition columns will populate dpCtx.setRootPath(queryTmpdir); @@ -6641,9 +6600,9 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) acidOp = getAcidType(table_desc.getOutputFileFormatClass()); checkAcidConstraints(qb, table_desc, dest_tab); } - ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp); - ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), - dest_tab.getTableName())); + boolean isReplace = !qb.getParseInfo().isInsertIntoTable( + dest_tab.getDbName(), dest_tab.getTableName()); + ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp, isReplace, isMmTable); ltd.setLbCtx(lbCtx); loadTableWork.add(ltd); } else { @@ -6652,42 +6611,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) setStatsForNonNativeTable(dest_tab); } - WriteEntity output = null; - - // Here only register the whole table for post-exec hook if no DP present - // in the case of DP, we will register WriteEntity in MoveTask when the - // list of dynamically created partitions are known. - if ((dpCtx == null || dpCtx.getNumDPCols() == 0)) { - output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable)); - if (!outputs.add(output)) { - throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES - .getMsg(dest_tab.getTableName())); - } - } - if ((dpCtx != null) && (dpCtx.getNumDPCols() >= 0)) { - // No static partition specified - if (dpCtx.getNumSPCols() == 0) { - output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable), false); - outputs.add(output); - } - // part of the partition specified - // Create a DummyPartition in this case. Since, the metastore does not store partial - // partitions currently, we need to store dummy partitions - else { - try { - String ppath = dpCtx.getSPPath(); - ppath = ppath.substring(0, ppath.length() - 1); - DummyPartition p = - new DummyPartition(dest_tab, dest_tab.getDbName() - + "@" + dest_tab.getTableName() + "@" + ppath, - partSpec); - output = new WriteEntity(p, WriteEntity.WriteType.INSERT, false); - outputs.add(output); - } catch (HiveException e) { - throw new SemanticException(e.getMessage(), e); - } - } - } + WriteEntity output = generateTableWriteEntity( + dest_tab, partSpec, ltd, dpCtx, isNonNativeTable); ctx.getLoadTableOutputMap().put(ltd, output); break; @@ -6697,40 +6622,22 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_part = qbm.getDestPartitionForAlias(dest); dest_tab = dest_part.getTable(); destTableIsAcid = AcidUtils.isAcidTable(dest_tab); - if ((!conf.getBoolVar(HiveConf.ConfVars.HIVE_INSERT_INTO_EXTERNAL_TABLES)) && - dest_tab.getTableType().equals(TableType.EXTERNAL_TABLE)) { - throw new SemanticException( - ErrorMsg.INSERT_EXTERNAL_TABLE.getMsg(dest_tab.getTableName())); - } + + checkExternalTable(dest_tab); Path tabPath = dest_tab.getPath(); Path partPath = dest_part.getDataLocation(); - // If the query here is an INSERT_INTO and the target is an immutable table, - // verify that our destination is empty before proceeding - if (dest_tab.isImmutable() && - qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),dest_tab.getTableName())){ - try { - FileSystem fs = partPath.getFileSystem(conf); - if (! MetaStoreUtils.isDirEmpty(fs,partPath)){ - LOG.warn("Attempted write into an immutable table partition : " - + dest_tab.getTableName() + " : " + partPath); - throw new SemanticException( - ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(dest_tab.getTableName())); - } - } catch (IOException ioe) { - LOG.warn("Error while trying to determine if immutable table partition has any data : " - + dest_tab.getTableName() + " : " + partPath); - throw new SemanticException(ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(ioe.getMessage())); - } - } + checkImmutableTable(qb, dest_tab, partPath, true); // if the table is in a different dfs than the partition, // replace the partition's dfs with the table's dfs. dest_path = new Path(tabPath.toUri().getScheme(), tabPath.toUri() .getAuthority(), partPath.toUri().getPath()); - queryTmpdir = ctx.getTempDirForPath(dest_path); + isMmTable = isMmTable(dest_tab); + queryTmpdir = isMmTable ? dest_path : ctx.getTempDirForPath(dest_path); + Utilities.LOG14535.info("createFS for partition specifying " + queryTmpdir + " from " + dest_path); table_desc = Utilities.getTableDesc(dest_tab); // Add sorting/bucketing if needed @@ -6946,6 +6853,54 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) genPartnCols(dest, input, qb, table_desc, dest_tab, rsCtx); } + FileSinkDesc fileSinkDesc = createFileSinkDesc(table_desc, dest_part, + dest_path, currentTableId, destTableIsAcid, destTableIsTemporary, + destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, + canBeMerged, isMmTable); + + Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( + fileSinkDesc, fsRS, input), inputRR); + + handleLineage(ltd, output); + + if (LOG.isDebugEnabled()) { + LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: " + + dest_path + " row schema: " + inputRR.toString()); + } + + FileSinkOperator fso = (FileSinkOperator) output; + fso.getConf().setTable(dest_tab); + fsopToTable.put(fso, dest_tab); + // the following code is used to collect column stats when + // hive.stats.autogather=true + // and it is an insert overwrite or insert into table + if (dest_tab != null && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) + && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) + && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) { + if (dest_type.intValue() == QBMetaData.DEST_TABLE) { + genAutoColumnStatsGatheringPipeline(qb, table_desc, partSpec, input, qb.getParseInfo() + .isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); + } else if (dest_type.intValue() == QBMetaData.DEST_PARTITION) { + genAutoColumnStatsGatheringPipeline(qb, table_desc, dest_part.getSpec(), input, qb + .getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); + + } + } + return output; + } + + private static boolean isMmTable(Table table) { + // TODO: perhaps it should be a 3rd value for 'transactional'? + String value = table.getProperty(hive_metastoreConstants.TABLE_IS_MM); + return value != null && value.equalsIgnoreCase("true"); + } + + private FileSinkDesc createFileSinkDesc(TableDesc table_desc, + Partition dest_part, Path dest_path, int currentTableId, + boolean destTableIsAcid, boolean destTableIsTemporary, + boolean destTableIsMaterialization, Path queryTmpdir, + SortBucketRSCtx rsCtx, DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx, + RowSchema fsRS, boolean canBeMerged, boolean isMmTable) throws SemanticException { FileSinkDesc fileSinkDesc = new FileSinkDesc( queryTmpdir, table_desc, @@ -6957,7 +6912,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) rsCtx.getTotalFiles(), rsCtx.getPartnCols(), dpCtx, - dest_path); + dest_path, + isMmTable); fileSinkDesc.setHiveServerQuery(SessionState.get().isHiveServerQuery()); // If this is an insert, update, or delete on an ACID table then mark that so the @@ -7001,10 +6957,11 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } else if (dpCtx != null) { fileSinkDesc.setStaticSpec(dpCtx.getSPPath()); } + return fileSinkDesc; + } - Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - fileSinkDesc, fsRS, input), inputRR); - + private void handleLineage(LoadTableDesc ltd, Operator output) + throws SemanticException { if (ltd != null && SessionState.get() != null) { SessionState.get().getLineageState() .mapDirToFop(ltd.getSourcePath(), (FileSinkOperator) output); @@ -7022,33 +6979,111 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) SessionState.get().getLineageState() .mapDirToFop(tlocation, (FileSinkOperator) output); } + } - if (LOG.isDebugEnabled()) { - LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: " - + dest_path + " row schema: " + inputRR.toString()); + private WriteEntity generateTableWriteEntity(Table dest_tab, + Map partSpec, LoadTableDesc ltd, + DynamicPartitionCtx dpCtx, boolean isNonNativeTable) + throws SemanticException { + WriteEntity output = null; + + // Here only register the whole table for post-exec hook if no DP present + // in the case of DP, we will register WriteEntity in MoveTask when the + // list of dynamically created partitions are known. + if ((dpCtx == null || dpCtx.getNumDPCols() == 0)) { + output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable)); + if (!outputs.add(output)) { + throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES + .getMsg(dest_tab.getTableName())); + } } + if ((dpCtx != null) && (dpCtx.getNumDPCols() >= 0)) { + // No static partition specified + if (dpCtx.getNumSPCols() == 0) { + output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable), false); + outputs.add(output); + } + // part of the partition specified + // Create a DummyPartition in this case. Since, the metastore does not store partial + // partitions currently, we need to store dummy partitions + else { + try { + String ppath = dpCtx.getSPPath(); + ppath = ppath.substring(0, ppath.length() - 1); + DummyPartition p = + new DummyPartition(dest_tab, dest_tab.getDbName() + + "@" + dest_tab.getTableName() + "@" + ppath, + partSpec); + output = new WriteEntity(p, WriteEntity.WriteType.INSERT, false); + outputs.add(output); + } catch (HiveException e) { + throw new SemanticException(e.getMessage(), e); + } + } + } + return output; + } - FileSinkOperator fso = (FileSinkOperator) output; - fso.getConf().setTable(dest_tab); - fsopToTable.put(fso, dest_tab); - // the following code is used to collect column stats when - // hive.stats.autogather=true - // and it is an insert overwrite or insert into table - if (dest_tab != null && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) - && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) - && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) { - if (dest_type.intValue() == QBMetaData.DEST_TABLE) { - genAutoColumnStatsGatheringPipeline(qb, table_desc, partSpec, input, qb.getParseInfo() - .isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); - } else if (dest_type.intValue() == QBMetaData.DEST_PARTITION) { - genAutoColumnStatsGatheringPipeline(qb, table_desc, dest_part.getSpec(), input, qb - .getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); + private void checkExternalTable(Table dest_tab) throws SemanticException { + if ((!conf.getBoolVar(HiveConf.ConfVars.HIVE_INSERT_INTO_EXTERNAL_TABLES)) && + (dest_tab.getTableType().equals(TableType.EXTERNAL_TABLE))) { + throw new SemanticException( + ErrorMsg.INSERT_EXTERNAL_TABLE.getMsg(dest_tab.getTableName())); + } + } + private void checkImmutableTable(QB qb, Table dest_tab, Path dest_path, boolean isPart) + throws SemanticException { + // If the query here is an INSERT_INTO and the target is an immutable table, + // verify that our destination is empty before proceeding + if (!dest_tab.isImmutable() || !qb.getParseInfo().isInsertIntoTable( + dest_tab.getDbName(), dest_tab.getTableName())) { + return; + } + try { + FileSystem fs = dest_path.getFileSystem(conf); + if (! MetaStoreUtils.isDirEmpty(fs,dest_path)){ + LOG.warn("Attempted write into an immutable table : " + + dest_tab.getTableName() + " : " + dest_path); + throw new SemanticException( + ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(dest_tab.getTableName())); } + } catch (IOException ioe) { + LOG.warn("Error while trying to determine if immutable table " + + (isPart ? "partition " : "") + "has any data : " + dest_tab.getTableName() + + " : " + dest_path); + throw new SemanticException(ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(ioe.getMessage())); } - return output; } + private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab, + Map partSpec, String dest) throws SemanticException { + List parts = dest_tab.getPartitionKeys(); + if (parts == null || parts.isEmpty()) return null; // table is not partitioned + if (partSpec == null || partSpec.size() == 0) { // user did NOT specify partition + throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), + ErrorMsg.NEED_PARTITION_ERROR.getMsg())); + } + DynamicPartitionCtx dpCtx = qbm.getDPCtx(dest); + if (dpCtx == null) { + dest_tab.validatePartColumnNames(partSpec, false); + dpCtx = new DynamicPartitionCtx(dest_tab, partSpec, + conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), + conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); + qbm.setDPCtx(dest, dpCtx); + } + + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP + throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), + ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg())); + } + if ((dest_tab.getNumBuckets() > 0)) { + dpCtx.setNumBuckets(dest_tab.getNumBuckets()); + } + return dpCtx; + } + + private void genAutoColumnStatsGatheringPipeline(QB qb, TableDesc table_desc, Map partSpec, Operator curr, boolean isInsertInto) throws SemanticException { String tableName = table_desc.getTableName(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java index 68b0ad9ea63f..ffc9c3e5d97c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.Utilities; /** * Conditional task resolution interface. This is invoked at run time to get the @@ -243,6 +244,7 @@ private void generateActualTasks(HiveConf conf, List partitionCols, final DynamicPartitionCtx dpCtx, Path destPath) { + final ArrayList partitionCols, final DynamicPartitionCtx dpCtx, Path destPath, + boolean isMmTable) { this.dirName = dirName; this.tableInfo = tableInfo; @@ -121,6 +123,7 @@ public FileSinkDesc(final Path dirName, final TableDesc tableInfo, this.dpCtx = dpCtx; this.dpSortState = DPSortState.NONE; this.destPath = destPath; + this.isMmTable = isMmTable; } public FileSinkDesc(final Path dirName, final TableDesc tableInfo, @@ -142,7 +145,7 @@ public FileSinkDesc(final Path dirName, final TableDesc tableInfo, public Object clone() throws CloneNotSupportedException { FileSinkDesc ret = new FileSinkDesc(dirName, tableInfo, compressed, destTableId, multiFileSpray, canBeMerged, numFiles, totalFiles, - partitionCols, dpCtx, destPath); + partitionCols, dpCtx, destPath, isMmTable); ret.setCompressCodec(compressCodec); ret.setCompressType(compressType); ret.setGatherStats(gatherStats); @@ -249,6 +252,10 @@ public void setTemporary(boolean temporary) { this.temporary = temporary; } + public boolean isMmTable() { + return isMmTable; + } + public boolean isMaterialization() { return materialization; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java index df153a2c4ab3..5e4e1fecd20b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.PTFUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; /** * LoadFileDesc. @@ -55,6 +56,7 @@ public LoadFileDesc(final Path sourcePath, final Path targetDir, final boolean isDfsDir, final String columns, final String columnTypes) { super(sourcePath); + Utilities.LOG14535.info("creating LFD from " + sourcePath + " to " + targetDir, new Exception()); this.targetDir = targetDir; this.isDfsDir = isDfsDir; this.columns = columns; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index 771a919ccd0b..1ac831d59cbf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -23,6 +23,7 @@ import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -41,18 +42,20 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc // Need to remember whether this is an acid compliant operation, and if so whether it is an // insert, update, or delete. private AcidUtils.Operation writeType; + private boolean isMmTable; // TODO: the below seems like they should just be combined into partitionDesc private org.apache.hadoop.hive.ql.plan.TableDesc table; private Map partitionSpec; // NOTE: this partitionSpec has to be ordered map - public LoadTableDesc(final Path sourcePath, + private LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, final boolean replace, final AcidUtils.Operation writeType) { super(sourcePath); - init(table, partitionSpec, replace, writeType); + Utilities.LOG14535.info("creating part LTD from " + sourcePath + " to " + table.getTableName(), new Exception()); + init(table, partitionSpec, replace, writeType, false); } /** @@ -91,13 +94,16 @@ public LoadTableDesc(final Path sourcePath, public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final DynamicPartitionCtx dpCtx, - final AcidUtils.Operation writeType) { + final AcidUtils.Operation writeType, + boolean isReplace, + boolean isMmTable) { super(sourcePath); + Utilities.LOG14535.info("creating LTD from " + sourcePath + " to " + table.getTableName(), new Exception()); this.dpCtx = dpCtx; if (dpCtx != null && dpCtx.getPartSpec() != null && partitionSpec == null) { - init(table, dpCtx.getPartSpec(), true, writeType); + init(table, dpCtx.getPartSpec(), isReplace, writeType, isMmTable); } else { - init(table, new LinkedHashMap(), true, writeType); + init(table, new LinkedHashMap(), isReplace, writeType, isMmTable); } } @@ -105,11 +111,12 @@ private void init( final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, final boolean replace, - AcidUtils.Operation writeType) { + AcidUtils.Operation writeType, boolean isMmTable) { this.table = table; this.partitionSpec = partitionSpec; this.replace = replace; this.writeType = writeType; + this.isMmTable = isMmTable; } @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -135,6 +142,11 @@ public boolean getReplace() { return replace; } + @Explain(displayName = "micromanaged table") + public boolean isMmTable() { + return isMmTable; + } + public void setReplace(boolean replace) { this.replace = replace; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java index 9f498c7fb88a..227b0d2231ca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java @@ -34,6 +34,7 @@ */ @Explain(displayName = "Move Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public class MoveWork implements Serializable { + // TODO# all the places where MoveWork is created need to be handled. private static final long serialVersionUID = 1L; private LoadTableDesc loadTableWork; private LoadFileDesc loadFileWork; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java index a8d7c9c461a2..1c27873877c5 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java @@ -285,7 +285,7 @@ private FileSinkOperator getFileSink(AcidUtils.Operation writeType, partColMap.put(PARTCOL_NAME, null); DynamicPartitionCtx dpCtx = new DynamicPartitionCtx(null, partColMap, "Sunday", 100); //todo: does this need the finalDestination? - desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx, null); + desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx, null, false); } else { desc = new FileSinkDesc(basePath, tableDesc, false); } diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q new file mode 100644 index 000000000000..aaf8d48770ae --- /dev/null +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -0,0 +1,63 @@ +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.fetch.task.conversion=none; + +drop table simple_mm; +drop table partunion_mm; +drop table merge_mm; +drop table ctas_mm; +drop table T1; +drop table T2; +drop table skew_mm; + + +create table simple_mm(key int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); +insert into table simple_mm partition(key_mm='455') select key from src limit 3; + +create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; + +create table partunion_mm(id_mm int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); + + +insert into table partunion_mm partition(key_mm) +select temps.* from ( +select key as key_mm, key from ctas_mm +union all +select key as key_mm, key from simple_mm ) temps; + +set hive.merge.mapredfiles=true; +set hive.merge.sparkfiles=true; +set hive.merge.tezfiles=true; + +CREATE TABLE merge_mm (key INT, value STRING) + PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties ('hivecommit'='true'); + +EXPLAIN +INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) + SELECT key, value, PMOD(HASH(key), 2) as part + FROM src; + +INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) + SELECT key, value, PMOD(HASH(key), 2) as part + FROM src; + + +set hive.optimize.skewjoin.compiletime = true; +-- the test case is wrong? + +CREATE TABLE T1(key STRING, val STRING) +SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +CREATE TABLE T2(key STRING, val STRING) +SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; +LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; + +EXPLAIN +SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; + +create table skew_mm(k1 string, k2 string, k3 string, k4 string) SKEWED BY (key) ON ((2)) tblproperties ('hivecommit'='true'); +INSERT OVERWRITE TABLE skew_mm +SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; + +-- TODO load, acid, etc diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q new file mode 100644 index 000000000000..882096b884ef --- /dev/null +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -0,0 +1,11 @@ +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.fetch.task.conversion=none; + +drop table simple_mm; + + +create table simple_mm(key int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); +insert into table simple_mm partition(key_mm='455') select key from src limit 3; + diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out new file mode 100644 index 000000000000..129bb13affcf --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -0,0 +1,21 @@ +PREHOOK: query: drop table simple123 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table simple123 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table simple123(key int) partitioned by (key123 int) tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@simple123 +POSTHOOK: query: create table simple123(key int) partitioned by (key123 int) tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@simple123 +PREHOOK: query: insert into table simple123 partition(key123='455') select key from src limit 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@simple123@key123=455 +POSTHOOK: query: insert into table simple123 partition(key123='455') select key from src limit 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@simple123@key123=455 +POSTHOOK: Lineage: simple123 PARTITION(key123=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] From 87dcab470f33ace818c775da6b0a9f18b10f66ac Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Wed, 31 Aug 2016 16:06:03 -0700 Subject: [PATCH 02/24] HIVE-14636 : pass information from FSOP/TezTask to commit to take care of speculative execution and failed tasks (Sergey Shelukhin) --- .../apache/hadoop/hive/common/FileUtils.java | 6 +- .../hadoop/hive/common/HiveStatsUtils.java | 14 +- .../org/apache/hadoop/hive/ql/Context.java | 9 +- .../ql/exec/AbstractFileMergeOperator.java | 4 +- .../hadoop/hive/ql/exec/FileSinkOperator.java | 194 +++++++++-- .../apache/hadoop/hive/ql/exec/MoveTask.java | 315 ++++++++++-------- .../apache/hadoop/hive/ql/exec/Utilities.java | 10 +- .../apache/hadoop/hive/ql/metadata/Hive.java | 2 +- .../hive/ql/parse/SemanticAnalyzer.java | 5 +- .../hadoop/hive/ql/parse/TypeCheckCtx.java | 2 +- .../hadoop/hive/ql/plan/FileSinkDesc.java | 9 + .../hadoop/hive/ql/plan/LoadFileDesc.java | 2 +- .../hadoop/hive/ql/plan/LoadTableDesc.java | 19 +- .../test/queries/clientpositive/mm_current.q | 18 +- .../clientpositive/llap/mm_current.q.out | 133 +++++++- 15 files changed, 517 insertions(+), 225 deletions(-) diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java index 3ed2d086fd8e..ad436109c73e 100644 --- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -329,9 +329,13 @@ public static String unescapePathName(String path) { */ public static void listStatusRecursively(FileSystem fs, FileStatus fileStatus, List results) throws IOException { + listStatusRecursively(fs, fileStatus, HIDDEN_FILES_PATH_FILTER, results); + } + public static void listStatusRecursively(FileSystem fs, FileStatus fileStatus, + PathFilter filter, List results) throws IOException { if (fileStatus.isDir()) { - for (FileStatus stat : fs.listStatus(fileStatus.getPath(), HIDDEN_FILES_PATH_FILTER)) { + for (FileStatus stat : fs.listStatus(fileStatus.getPath(), filter)) { listStatusRecursively(fs, stat, results); } } else { diff --git a/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java b/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java index 7c9d72fbd2d0..111d99c143c1 100644 --- a/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.conf.HiveConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,15 +51,20 @@ public class HiveStatsUtils { * @return array of FileStatus * @throws IOException */ - public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem fs) + public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem fs) throws IOException { + return getFileStatusRecurse(path, level, fs, FileUtils.HIDDEN_FILES_PATH_FILTER); + } + + public static FileStatus[] getFileStatusRecurse( + Path path, int level, FileSystem fs, PathFilter filter) throws IOException { // if level is <0, the return all files/directories under the specified path - if ( level < 0) { + if (level < 0) { List result = new ArrayList(); try { FileStatus fileStatus = fs.getFileStatus(path); - FileUtils.listStatusRecursively(fs, fileStatus, result); + FileUtils.listStatusRecursively(fs, fileStatus, filter, result); } catch (IOException e) { // globStatus() API returns empty FileStatus[] when the specified path // does not exist. But getFileStatus() throw IOException. To mimic the @@ -75,7 +81,7 @@ public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem sb.append(Path.SEPARATOR).append("*"); } Path pathPattern = new Path(path, sb.toString()); - return fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER); + return fs.globStatus(pathPattern, filter); } public static int getNumBitVectorsForNDVEstimation(Configuration conf) throws Exception { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java index ceb257c84569..1013f7c07e19 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -233,7 +233,8 @@ private Path getStagingDir(Path inputPath, boolean mkdir) { // Append task specific info to stagingPathName, instead of creating a sub-directory. // This way we don't have to worry about deleting the stagingPathName separately at // end of query execution. - dir = fs.makeQualified(new Path(stagingPathName + "_" + this.executionId + "-" + TaskRunner.getTaskRunnerID())); + // TODO# HERE + dir = fs.makeQualified(new Path(stagingPathName + "_" + getExecutionPrefix())); LOG.debug("Created staging dir = " + dir + " for path = " + inputPath); @@ -819,6 +820,10 @@ public void setSkipTableMasking(boolean skipTableMasking) { this.skipTableMasking = skipTableMasking; } + public String getExecutionPrefix() { + return this.executionId + "-" + TaskRunner.getTaskRunnerID(); + } + public ExplainConfiguration getExplainConfig() { return explainConfig; } @@ -827,7 +832,7 @@ public void setExplainConfig(ExplainConfiguration explainConfig) { this.explainConfig = explainConfig; } - public void resetOpContext(){ + public void resetOpContext() { opContext = new CompilationOpContext(); sequencer = new AtomicInteger(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java index dfad6c192947..40c784bf7673 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java @@ -254,8 +254,8 @@ public void jobCloseOp(Configuration hconf, boolean success) Path outputDir = conf.getOutputPath(); FileSystem fs = outputDir.getFileSystem(hconf); Path backupPath = backupOutputPath(fs, outputDir); - Utilities - .mvFileToFinalPath(outputDir, hconf, success, LOG, conf.getDpCtx(), + // TODO# merge-related move + Utilities.mvFileToFinalPath(outputDir, hconf, success, LOG, conf.getDpCtx(), null, reporter); if (success) { LOG.info("jobCloseOp moved merged files to output dir: " + outputDir); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 1f5dfea90f45..b8a2c5ae7872 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -27,16 +27,22 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; @@ -79,6 +85,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hive.common.util.HiveStringUtils; import org.slf4j.Logger; @@ -92,6 +99,7 @@ public class FileSinkOperator extends TerminalOperator implements Serializable { + private static final String MANIFEST_EXTENSION = ".manifest"; public static final Logger LOG = LoggerFactory.getLogger(FileSinkOperator.class); private static final boolean isInfoEnabled = LOG.isInfoEnabled(); private static final boolean isDebugEnabled = LOG.isDebugEnabled(); @@ -165,7 +173,7 @@ public FSPaths(Path specPath, boolean isMmTable) { } Utilities.LOG14535.info("new FSPaths for " + numFiles + " files, dynParts = " + bDynParts + ": tmpPath " + tmpPath + ", task path " + taskOutputTempPath - + " (spec path " + specPath + ")", new Exception()); + + " (spec path " + specPath + ")"/*, new Exception()*/); outPaths = new Path[numFiles]; finalPaths = new Path[numFiles]; @@ -187,7 +195,7 @@ public Path getTaskOutPath(String taskId) { /** * Update the final paths according to tmpPath. */ - public Path getFinalPath(String taskId, Path tmpPath, String extension) { + private Path getFinalPath(String taskId, Path tmpPath, String extension) { if (extension != null) { return new Path(tmpPath, taskId + extension); } else { @@ -218,41 +226,64 @@ public void closeWriters(boolean abort) throws HiveException { } private void commit(FileSystem fs) throws HiveException { - if (isMmTable) return; // TODO#: need to propagate to MoveTask instead + List commitPaths = null; + if (isMmTable) { + commitPaths = new ArrayList<>(); + } for (int idx = 0; idx < outPaths.length; ++idx) { try { - if ((bDynParts || isSkewedStoredAsSubDirectories) - && !fs.exists(finalPaths[idx].getParent())) { - Utilities.LOG14535.info("commit making path for dyn/skew: " + finalPaths[idx].getParent()); - fs.mkdirs(finalPaths[idx].getParent()); - } - boolean needToRename = true; - if (conf.getWriteType() == AcidUtils.Operation.UPDATE || - conf.getWriteType() == AcidUtils.Operation.DELETE) { - // If we're updating or deleting there may be no file to close. This can happen - // because the where clause strained out all of the records for a given bucket. So - // before attempting the rename below, check if our file exists. If it doesn't, - // then skip the rename. If it does try it. We could just blindly try the rename - // and avoid the extra stat, but that would mask other errors. - try { - if (outPaths[idx] != null) { - FileStatus stat = fs.getFileStatus(outPaths[idx]); - } - } catch (FileNotFoundException fnfe) { - needToRename = false; - } - } - Utilities.LOG14535.info("commit potentially moving " + outPaths[idx] + " to " + finalPaths[idx]); - if (needToRename && outPaths[idx] != null && !fs.rename(outPaths[idx], finalPaths[idx])) { - throw new HiveException("Unable to rename output from: " + - outPaths[idx] + " to: " + finalPaths[idx]); - } - updateProgress(); + commitOneOutPath(idx, fs, commitPaths); } catch (IOException e) { throw new HiveException("Unable to rename output from: " + outPaths[idx] + " to: " + finalPaths[idx], e); } } + if (isMmTable) { + Path manifestPath = new Path(specPath, "_tmp." + getPrefixedTaskId() + MANIFEST_EXTENSION); + Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths); + try { + try (FSDataOutputStream out = fs.create(manifestPath)) { + out.writeInt(commitPaths.size()); + for (Path path : commitPaths) { + out.writeUTF(path.toString()); + } + } + } catch (IOException e) { + throw new HiveException(e); + } + } + } + + private String getPrefixedTaskId() { + return conf.getExecutionPrefix() + "_" + taskId; + } + + private void commitOneOutPath(int idx, FileSystem fs, List commitPaths) + throws IOException, HiveException { + if ((bDynParts || isSkewedStoredAsSubDirectories) + && !fs.exists(finalPaths[idx].getParent())) { + Utilities.LOG14535.info("commit making path for dyn/skew: " + finalPaths[idx].getParent()); + fs.mkdirs(finalPaths[idx].getParent()); + } + // If we're updating or deleting there may be no file to close. This can happen + // because the where clause strained out all of the records for a given bucket. So + // before attempting the rename below, check if our file exists. If it doesn't, + // then skip the rename. If it does try it. We could just blindly try the rename + // and avoid the extra stat, but that would mask other errors. + boolean needToRename = (conf.getWriteType() != AcidUtils.Operation.UPDATE && + conf.getWriteType() != AcidUtils.Operation.DELETE) || fs.exists(outPaths[idx]); + if (needToRename && outPaths[idx] != null) { + Utilities.LOG14535.info("committing " + outPaths[idx] + " to " + finalPaths[idx] + " (" + isMmTable + ")"); + if (isMmTable) { + assert outPaths[idx].equals(finalPaths[idx]); + commitPaths.add(outPaths[idx]); + } else if (!fs.rename(outPaths[idx], finalPaths[idx])) { + throw new HiveException("Unable to rename output from: " + + outPaths[idx] + " to: " + finalPaths[idx]); + } + } + + updateProgress(); } public void abortWriters(FileSystem fs, boolean abort, boolean delete) throws HiveException { @@ -297,10 +328,10 @@ public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeT outPaths[filesIdx] = getTaskOutPath(taskId); } else { if (!bDynParts && !isSkewedStoredAsSubDirectories) { - finalPaths[filesIdx] = getFinalPath(taskId, specPath, extension); + finalPaths[filesIdx] = getFinalPath(getPrefixedTaskId(), specPath, extension); } else { // TODO# wrong! - finalPaths[filesIdx] = getFinalPath(taskId, specPath, extension); + finalPaths[filesIdx] = getFinalPath(getPrefixedTaskId(), specPath, extension); } outPaths[filesIdx] = finalPaths[filesIdx]; } @@ -638,7 +669,7 @@ protected void createBucketForFileIdx(FSPaths fsp, int filesIdx) fsp.initializeBucketPaths(filesIdx, taskId, isNativeTable, isSkewedStoredAsSubDirectories); Utilities.LOG14535.info("createBucketForFileIdx " + filesIdx + ": final path " + fsp.finalPaths[filesIdx] + "; out path " + fsp.outPaths[filesIdx] +" (spec path " + specPath + ", tmp path " - + fsp.getTmpPath() + ", task " + taskId + ")", new Exception()); + + fsp.getTmpPath() + ", task " + taskId + ")"/*, new Exception()*/); if (isInfoEnabled) { LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]); @@ -1150,9 +1181,13 @@ public void jobCloseOp(Configuration hconf, boolean success) DynamicPartitionCtx dpCtx = conf.getDynPartCtx(); if (conf.isLinkedFileSink() && (dpCtx != null)) { specPath = conf.getParentDir(); + Utilities.LOG14535.info("Setting specPath to " + specPath + " for dynparts"); + } + if (!conf.isMmTable()) { + Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, reporter); // TODO# other callers + } else { + handleMmTable(specPath, hconf, success, dpCtx, conf, reporter); } - Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, - reporter); } } catch (IOException e) { throw new HiveException(e); @@ -1160,6 +1195,95 @@ public void jobCloseOp(Configuration hconf, boolean success) super.jobCloseOp(hconf, success); } + private static class ExecPrefixPathFilter implements PathFilter { + private final String prefix, tmpPrefix; + public ExecPrefixPathFilter(String prefix) { + this.prefix = prefix; + this.tmpPrefix = "_tmp." + prefix; + } + + @Override + public boolean accept(Path path) { + String name = path.getName(); + return name.startsWith(prefix) || name.startsWith(tmpPrefix); + } + } + + + private void handleMmTable(Path specPath, Configuration hconf, boolean success, + DynamicPartitionCtx dpCtx, FileSinkDesc conf, Reporter reporter) + throws IOException, HiveException { + FileSystem fs = specPath.getFileSystem(hconf); + int targetLevel = (dpCtx == null) ? 1 : dpCtx.getNumDPCols(); + if (!success) { + FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse( + specPath, targetLevel, fs, new ExecPrefixPathFilter(conf.getExecutionPrefix())); + for (FileStatus status : statuses) { + Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); + tryDelete(fs, status.getPath()); + } + return; + } + FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse( + specPath, targetLevel, fs, new ExecPrefixPathFilter(conf.getExecutionPrefix())); + if (statuses == null) return; + LinkedList results = new LinkedList<>(); + List manifests = new ArrayList<>(statuses.length); + for (FileStatus status : statuses) { + if (status.getPath().getName().endsWith(MANIFEST_EXTENSION)) { + manifests.add(status.getPath()); + } else { + results.add(status); + } + } + HashSet committed = new HashSet<>(); + for (Path mfp : manifests) { + try (FSDataInputStream mdis = fs.open(mfp)) { + int fileCount = mdis.readInt(); + for (int i = 0; i < fileCount; ++i) { + String nextFile = mdis.readUTF(); + if (!committed.add(nextFile)) { + throw new HiveException(nextFile + " was specified in multiple manifests"); + } + } + } + } + Iterator iter = results.iterator(); + while (iter.hasNext()) { + FileStatus rfs = iter.next(); + if (!committed.remove(rfs.getPath().toString())) { + iter.remove(); + Utilities.LOG14535.info("Deleting " + rfs.getPath() + " that was not committed"); + tryDelete(fs, rfs.getPath()); + } + } + if (!committed.isEmpty()) { + throw new HiveException("The following files were committed but not found: " + committed); + } + for (Path mfp : manifests) { + Utilities.LOG14535.info("Deleting manifest " + mfp); + tryDelete(fs, mfp); + } + + if (results.isEmpty()) return; + FileStatus[] finalResults = results.toArray(new FileStatus[results.size()]); + + List emptyBuckets = Utilities.removeTempOrDuplicateFiles( + fs, finalResults, dpCtx, conf, hconf); + // create empty buckets if necessary + if (emptyBuckets.size() > 0) { + Utilities.createEmptyBuckets(hconf, emptyBuckets, conf, reporter); + } + } + + private void tryDelete(FileSystem fs, Path path) { + try { + fs.delete(path, false); + } catch (IOException ex) { + LOG.error("Failed to delete " + path, ex); + } + } + @Override public OperatorType getType() { return OperatorType.FILESINK; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 2ab97f7e9b52..e3646dae63fb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -241,6 +241,18 @@ public boolean hasFollowingStatsTask() { return false; } + private final static class TaskInformation { + public List bucketCols = null; + public List sortCols = null; + public int numBuckets = -1; + public Task task; + public String path; + public TaskInformation(Task task, String path) { + this.task = task; + this.path = path; + } + } + @Override public int execute(DriverContext driverContext) { @@ -318,155 +330,15 @@ public int execute(DriverContext driverContext) { LOG.info("Partition is: " + tbd.getPartitionSpec().toString()); // Check if the bucketing and/or sorting columns were inferred - List bucketCols = null; - List sortCols = null; - int numBuckets = -1; - Task task = this; - String path = tbd.getSourcePath().toUri().toString(); - // Find the first ancestor of this MoveTask which is some form of map reduce task - // (Either standard, local, or a merge) - while (task.getParentTasks() != null && task.getParentTasks().size() == 1) { - task = (Task)task.getParentTasks().get(0); - // If it was a merge task or a local map reduce task, nothing can be inferred - if (task instanceof MergeFileTask || task instanceof MapredLocalTask) { - break; - } - - // If it's a standard map reduce task, check what, if anything, it inferred about - // the directory this move task is moving - if (task instanceof MapRedTask) { - MapredWork work = (MapredWork)task.getWork(); - MapWork mapWork = work.getMapWork(); - bucketCols = mapWork.getBucketedColsByDirectory().get(path); - sortCols = mapWork.getSortedColsByDirectory().get(path); - if (work.getReduceWork() != null) { - numBuckets = work.getReduceWork().getNumReduceTasks(); - } - - if (bucketCols != null || sortCols != null) { - // This must be a final map reduce task (the task containing the file sink - // operator that writes the final output) - assert work.isFinalMapRed(); - } - break; - } - - // If it's a move task, get the path the files were moved from, this is what any - // preceding map reduce task inferred information about, and moving does not invalidate - // those assumptions - // This can happen when a conditional merge is added before the final MoveTask, but the - // condition for merging is not met, see GenMRFileSink1. - if (task instanceof MoveTask) { - if (((MoveTask)task).getWork().getLoadFileWork() != null) { - path = ((MoveTask)task).getWork().getLoadFileWork().getSourcePath().toUri().toString(); - } - } - } + TaskInformation ti = new TaskInformation(this, tbd.getSourcePath().toUri().toString()); + inferTaskInformation(ti); // deal with dynamic partitions DynamicPartitionCtx dpCtx = tbd.getDPCtx(); if (dpCtx != null && dpCtx.getNumDPCols() > 0) { // dynamic partitions - - List> dps = Utilities.getFullDPSpecs(conf, dpCtx); - - // publish DP columns to its subscribers - if (dps != null && dps.size() > 0) { - pushFeed(FeedType.DYNAMIC_PARTITIONS, dps); - } - console.printInfo(System.getProperty("line.separator")); - long startTime = System.currentTimeMillis(); - // load the list of DP partitions and return the list of partition specs - // TODO: In a follow-up to HIVE-1361, we should refactor loadDynamicPartitions - // to use Utilities.getFullDPSpecs() to get the list of full partSpecs. - // After that check the number of DPs created to not exceed the limit and - // iterate over it and call loadPartition() here. - // The reason we don't do inside HIVE-1361 is the latter is large and we - // want to isolate any potential issue it may introduce. - Map, Partition> dp = - db.loadDynamicPartitions( - tbd.getSourcePath(), - tbd.getTable().getTableName(), - tbd.getPartitionSpec(), - tbd.getReplace(), - dpCtx.getNumDPCols(), - isSkewedStoredAsDirs(tbd), - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, - SessionState.get().getTxnMgr().getCurrentTxnId(), hasFollowingStatsTask(), - work.getLoadTableWork().getWriteType()); - - console.printInfo("\t Time taken to load dynamic partitions: " + - (System.currentTimeMillis() - startTime)/1000.0 + " seconds"); - - if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) { - throw new HiveException("This query creates no partitions." + - " To turn off this error, set hive.error.on.empty.partition=false."); - } - - startTime = System.currentTimeMillis(); - // for each partition spec, get the partition - // and put it to WriteEntity for post-exec hook - for(Map.Entry, Partition> entry : dp.entrySet()) { - Partition partn = entry.getValue(); - - if (bucketCols != null || sortCols != null) { - updatePartitionBucketSortColumns( - db, table, partn, bucketCols, numBuckets, sortCols); - } - - WriteEntity enty = new WriteEntity(partn, - (tbd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE : - WriteEntity.WriteType.INSERT)); - if (work.getOutputs() != null) { - work.getOutputs().add(enty); - } - // Need to update the queryPlan's output as well so that post-exec hook get executed. - // This is only needed for dynamic partitioning since for SP the the WriteEntity is - // constructed at compile time and the queryPlan already contains that. - // For DP, WriteEntity creation is deferred at this stage so we need to update - // queryPlan here. - if (queryPlan.getOutputs() == null) { - queryPlan.setOutputs(new LinkedHashSet()); - } - queryPlan.getOutputs().add(enty); - - // update columnar lineage for each partition - dc = new DataContainer(table.getTTable(), partn.getTPartition()); - - // Don't set lineage on delete as we don't have all the columns - if (SessionState.get() != null && - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.DELETE && - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.UPDATE) { - SessionState.get().getLineageState().setLineage(tbd.getSourcePath(), dc, - table.getCols()); - } - LOG.info("\tLoading partition " + entry.getKey()); - } - console.printInfo("\t Time taken for adding to write entity : " + - (System.currentTimeMillis() - startTime)/1000.0 + " seconds"); - dc = null; // reset data container to prevent it being added again. + dc = handleDynParts(db, table, tbd, ti, dpCtx); } else { // static partitions - List partVals = MetaStoreUtils.getPvals(table.getPartCols(), - tbd.getPartitionSpec()); - db.validatePartitionNameCharacters(partVals); - Utilities.LOG14535.info("loadPartition called from " + tbd.getSourcePath() + " into " + tbd.getTable()); - db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(), - tbd.getPartitionSpec(), tbd.getReplace(), - tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(), - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, hasFollowingStatsTask(), tbd.isMmTable()); - Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); - - if (bucketCols != null || sortCols != null) { - updatePartitionBucketSortColumns(db, table, partn, bucketCols, - numBuckets, sortCols); - } - - dc = new DataContainer(table.getTTable(), partn.getTPartition()); - // add this partition to post-execution hook - if (work.getOutputs() != null) { - work.getOutputs().add(new WriteEntity(partn, - (tbd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE - : WriteEntity.WriteType.INSERT))); - } - } + dc = handleStaticParts(db, table, tbd, ti); + } } if (SessionState.get() != null && dc != null) { // If we are doing an update or a delete the number of columns in the table will not @@ -500,6 +372,159 @@ public int execute(DriverContext driverContext) { } } + private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd, + TaskInformation ti) throws HiveException, IOException, InvalidOperationException { + List partVals = MetaStoreUtils.getPvals(table.getPartCols(), tbd.getPartitionSpec()); + db.validatePartitionNameCharacters(partVals); + Utilities.LOG14535.info("loadPartition called from " + tbd.getSourcePath() + " into " + tbd.getTable()); + db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(), + tbd.getPartitionSpec(), tbd.getReplace(), + tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(), + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, hasFollowingStatsTask(), tbd.isMmTable()); + Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); + + if (ti.bucketCols != null || ti.sortCols != null) { + updatePartitionBucketSortColumns(db, table, partn, ti.bucketCols, + ti.numBuckets, ti.sortCols); + } + + DataContainer dc = new DataContainer(table.getTTable(), partn.getTPartition()); + // add this partition to post-execution hook + if (work.getOutputs() != null) { + work.getOutputs().add(new WriteEntity(partn, + (tbd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE + : WriteEntity.WriteType.INSERT))); + } + return dc; + } + + private DataContainer handleDynParts(Hive db, Table table, LoadTableDesc tbd, + TaskInformation ti, DynamicPartitionCtx dpCtx) throws HiveException, + IOException, InvalidOperationException { + DataContainer dc; + List> dps = Utilities.getFullDPSpecs(conf, dpCtx); + + // publish DP columns to its subscribers + if (dps != null && dps.size() > 0) { + pushFeed(FeedType.DYNAMIC_PARTITIONS, dps); + } + console.printInfo(System.getProperty("line.separator")); + long startTime = System.currentTimeMillis(); + // load the list of DP partitions and return the list of partition specs + // TODO: In a follow-up to HIVE-1361, we should refactor loadDynamicPartitions + // to use Utilities.getFullDPSpecs() to get the list of full partSpecs. + // After that check the number of DPs created to not exceed the limit and + // iterate over it and call loadPartition() here. + // The reason we don't do inside HIVE-1361 is the latter is large and we + // want to isolate any potential issue it may introduce. + Map, Partition> dp = + db.loadDynamicPartitions( + tbd.getSourcePath(), + tbd.getTable().getTableName(), + tbd.getPartitionSpec(), + tbd.getReplace(), + dpCtx.getNumDPCols(), + isSkewedStoredAsDirs(tbd), + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, + SessionState.get().getTxnMgr().getCurrentTxnId(), hasFollowingStatsTask(), + work.getLoadTableWork().getWriteType()); + + console.printInfo("\t Time taken to load dynamic partitions: " + + (System.currentTimeMillis() - startTime)/1000.0 + " seconds"); + + if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) { + throw new HiveException("This query creates no partitions." + + " To turn off this error, set hive.error.on.empty.partition=false."); + } + + startTime = System.currentTimeMillis(); + // for each partition spec, get the partition + // and put it to WriteEntity for post-exec hook + for(Map.Entry, Partition> entry : dp.entrySet()) { + Partition partn = entry.getValue(); + + if (ti.bucketCols != null || ti.sortCols != null) { + updatePartitionBucketSortColumns( + db, table, partn, ti.bucketCols, ti.numBuckets, ti.sortCols); + } + + WriteEntity enty = new WriteEntity(partn, + (tbd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE : + WriteEntity.WriteType.INSERT)); + if (work.getOutputs() != null) { + work.getOutputs().add(enty); + } + // Need to update the queryPlan's output as well so that post-exec hook get executed. + // This is only needed for dynamic partitioning since for SP the the WriteEntity is + // constructed at compile time and the queryPlan already contains that. + // For DP, WriteEntity creation is deferred at this stage so we need to update + // queryPlan here. + if (queryPlan.getOutputs() == null) { + queryPlan.setOutputs(new LinkedHashSet()); + } + queryPlan.getOutputs().add(enty); + + // update columnar lineage for each partition + dc = new DataContainer(table.getTTable(), partn.getTPartition()); + + // Don't set lineage on delete as we don't have all the columns + if (SessionState.get() != null && + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.DELETE && + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.UPDATE) { + SessionState.get().getLineageState().setLineage(tbd.getSourcePath(), dc, + table.getCols()); + } + LOG.info("\tLoading partition " + entry.getKey()); + } + console.printInfo("\t Time taken for adding to write entity : " + + (System.currentTimeMillis() - startTime)/1000.0 + " seconds"); + dc = null; // reset data container to prevent it being added again. + return dc; + } + + private void inferTaskInformation(TaskInformation ti) { + // Find the first ancestor of this MoveTask which is some form of map reduce task + // (Either standard, local, or a merge) + while (ti.task.getParentTasks() != null && ti.task.getParentTasks().size() == 1) { + ti.task = (Task)ti.task.getParentTasks().get(0); + // If it was a merge task or a local map reduce task, nothing can be inferred + if (ti.task instanceof MergeFileTask || ti.task instanceof MapredLocalTask) { + break; + } + + // If it's a standard map reduce task, check what, if anything, it inferred about + // the directory this move task is moving + if (ti.task instanceof MapRedTask) { + MapredWork work = (MapredWork)ti.task.getWork(); + MapWork mapWork = work.getMapWork(); + ti.bucketCols = mapWork.getBucketedColsByDirectory().get(ti.path); + ti.sortCols = mapWork.getSortedColsByDirectory().get(ti.path); + if (work.getReduceWork() != null) { + ti.numBuckets = work.getReduceWork().getNumReduceTasks(); + } + + if (ti.bucketCols != null || ti.sortCols != null) { + // This must be a final map reduce task (the task containing the file sink + // operator that writes the final output) + assert work.isFinalMapRed(); + } + break; + } + + // If it's a move task, get the path the files were moved from, this is what any + // preceding map reduce task inferred information about, and moving does not invalidate + // those assumptions + // This can happen when a conditional merge is added before the final MoveTask, but the + // condition for merging is not met, see GenMRFileSink1. + if (ti.task instanceof MoveTask) { + MoveTask mt = (MoveTask)ti.task; + if (mt.getWork().getLoadFileWork() != null) { + ti.path = mt.getWork().getLoadFileWork().getSourcePath().toUri().toString(); + } + } + } + } + private void checkFileFormats(Hive db, LoadTableDesc tbd, Table table) throws HiveException { if (work.getCheckFileFormat()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index a7f7b9f1fa6a..427f0675d24f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -1409,7 +1409,6 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, Path tmpPath = Utilities.toTempPath(specPath); Path taskTmpPath = Utilities.toTaskTempPath(specPath); if (success) { - // TODO# specPath instead of tmpPath FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse( tmpPath, ((dpCtx == null) ? 1 : dpCtx.getNumDPCols()), fs); if(statuses != null && statuses.length > 0) { @@ -1423,8 +1422,6 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, Utilities.LOG14535.info("Moving tmp dir: " + tmpPath + " to: " + specPath); Utilities.renameOrMoveFiles(fs, tmpPath, specPath); } - List paths = new ArrayList<>(); - // TODO#: HERE listFilesToCommit(specPath, fs, paths); } else { Utilities.LOG14535.info("deleting tmpPath " + tmpPath); fs.delete(tmpPath, true); @@ -1445,7 +1442,7 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, * @throws HiveException * @throws IOException */ - private static void createEmptyBuckets(Configuration hconf, List paths, + static void createEmptyBuckets(Configuration hconf, List paths, FileSinkDesc conf, Reporter reporter) throws HiveException, IOException { @@ -1586,19 +1583,18 @@ public static HashMap removeTempOrDuplicateFiles(FileStatus[ for (FileStatus one : items) { if (isTempPath(one)) { - Utilities.LOG14535.info("removeTempOrDuplicateFiles deleting " + one.getPath(), new Exception()); + Utilities.LOG14535.info("removeTempOrDuplicateFiles deleting " + one.getPath()/*, new Exception()*/); if (!fs.delete(one.getPath(), true)) { throw new IOException("Unable to delete tmp file: " + one.getPath()); } } else { String taskId = getPrefixedTaskIdFromFilename(one.getPath().getName()); - Utilities.LOG14535.info("removeTempOrDuplicateFiles pondering " + one.getPath() + ", taskId " + taskId, new Exception()); + Utilities.LOG14535.info("removeTempOrDuplicateFiles pondering " + one.getPath() + ", taskId " + taskId/*, new Exception()*/); FileStatus otherFile = taskIdToFile.get(taskId); if (otherFile == null) { taskIdToFile.put(taskId, one); } else { - // TODO# file choice! // Compare the file sizes of all the attempt files for the same task, the largest win // any attempt files could contain partial results (due to task failures or // speculative runs), but the largest should be the correct one since the result diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 7d8c961a53e8..e43c60026bc7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1878,7 +1878,7 @@ public Void call() throws Exception { Utilities.LOG14535.info("loadPartition called for DPP from " + partPath + " to " + tbl.getTableName()); Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace, true, listBucketingEnabled, - false, isAcid, hasFollowingStatsTask, false); // TODO# here + false, isAcid, hasFollowingStatsTask, false); // TODO# special case #N partitionsMap.put(fullPartSpec, newPartition); if (inPlaceEligible) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 6ed379a01af9..499530e6c0ba 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -6658,7 +6658,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) acidOp = getAcidType(table_desc.getOutputFileFormatClass()); checkAcidConstraints(qb, table_desc, dest_tab); } - ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp); + ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, isMmTable); ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); ltd.setLbCtx(lbCtx); @@ -6860,6 +6860,9 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_path, currentTableId, destTableIsAcid, destTableIsTemporary, destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, canBeMerged, isMmTable); + if (isMmTable) { + fileSinkDesc.setExecutionPrefix(ctx.getExecutionPrefix()); + } Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( fileSinkDesc, fsRS, input), inputRR); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java index 02896ff6a61e..26f1d7055b71 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java @@ -160,7 +160,7 @@ public void setError(String error, ASTNode errorSrcNode) { if (LOG.isDebugEnabled()) { // Logger the callstack from which the error has been set. LOG.debug("Setting error: [" + error + "] from " - + ((errorSrcNode == null) ? "null" : errorSrcNode.toStringTree()), new Exception()); + + ((errorSrcNode == null) ? "null" : errorSrcNode.toStringTree())/*, new Exception()*/); } this.error = error; this.errorSrcNode = errorSrcNode; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index 0a4848b42f86..f51999dfeeba 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -97,6 +97,7 @@ public enum DPSortState { private Path destPath; private boolean isHiveServerQuery; private boolean isMmTable; + private String executionPrefix; public FileSinkDesc() { } @@ -158,6 +159,7 @@ public Object clone() throws CloneNotSupportedException { ret.setWriteType(writeType); ret.setTransactionId(txnId); ret.setStatsTmpDir(statsTmpDir); + ret.setExecutionPrefix(executionPrefix); return ret; } @@ -481,4 +483,11 @@ public void setStatsTmpDir(String statsCollectionTempDir) { this.statsTmpDir = statsCollectionTempDir; } + public String getExecutionPrefix() { + return this.executionPrefix; + } + + public void setExecutionPrefix(String value) { + this.executionPrefix = value; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java index 5e4e1fecd20b..5cad65c22e3d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java @@ -56,7 +56,7 @@ public LoadFileDesc(final Path sourcePath, final Path targetDir, final boolean isDfsDir, final String columns, final String columnTypes) { super(sourcePath); - Utilities.LOG14535.info("creating LFD from " + sourcePath + " to " + targetDir, new Exception()); + Utilities.LOG14535.info("creating LFD from " + sourcePath + " to " + targetDir/*, new Exception()*/); this.targetDir = targetDir; this.isDfsDir = isDfsDir; this.columns = columns; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index 1ac831d59cbf..3b491978ec56 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -52,10 +52,10 @@ private LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, final boolean replace, - final AcidUtils.Operation writeType) { + final AcidUtils.Operation writeType, boolean isMmTable) { super(sourcePath); - Utilities.LOG14535.info("creating part LTD from " + sourcePath + " to " + table.getTableName(), new Exception()); - init(table, partitionSpec, replace, writeType, false); + Utilities.LOG14535.info("creating part LTD from " + sourcePath + " to " + table.getTableName()/*, new Exception()*/); + init(table, partitionSpec, replace, writeType, isMmTable); } /** @@ -69,14 +69,16 @@ public LoadTableDesc(final Path sourcePath, final TableDesc table, final Map partitionSpec, final boolean replace) { - this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID); + // TODO# we assume mm=false here + this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID, false); } public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, - final AcidUtils.Operation writeType) { - this(sourcePath, table, partitionSpec, true, writeType); + final AcidUtils.Operation writeType, boolean isMmTable) { + // TODO# we assume mm=false here + this(sourcePath, table, partitionSpec, true, writeType, isMmTable); } /** @@ -88,7 +90,8 @@ public LoadTableDesc(final Path sourcePath, public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec) { - this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID); + // TODO# we assume mm=false here + this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID, false); } public LoadTableDesc(final Path sourcePath, @@ -98,7 +101,7 @@ public LoadTableDesc(final Path sourcePath, boolean isReplace, boolean isMmTable) { super(sourcePath); - Utilities.LOG14535.info("creating LTD from " + sourcePath + " to " + table.getTableName(), new Exception()); + Utilities.LOG14535.info("creating LTD from " + sourcePath + " to " + table.getTableName()/*, new Exception()*/); this.dpCtx = dpCtx; if (dpCtx != null && dpCtx.getPartSpec() != null && partitionSpec == null) { init(table, dpCtx.getPartSpec(), isReplace, writeType, isMmTable); diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index 882096b884ef..11259cb1617a 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -2,10 +2,20 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.exec.dynamic.partition.mode=nonstrict; set hive.fetch.task.conversion=none; +set tez.grouping.min-size=1; +set tez.grouping.max-size=2; +set hive.tez.auto.reducer.parallelism=false; -drop table simple_mm; - +create table intermediate(key int) partitioned by (p int) stored as orc; +insert into table intermediate partition(p='455') select key from src limit 3; +insert into table intermediate partition(p='456') select key from src limit 3; +insert into table intermediate partition(p='457') select key from src limit 3; -create table simple_mm(key int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); -insert into table simple_mm partition(key_mm='455') select key from src limit 3; +create table simple_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true'); + +explain insert into table simple_mm partition(key_mm='455') select key from intermediate; +insert into table simple_mm partition(key_mm='455') select key from intermediate; + +drop table simple_mm; +drop table intermediate; diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index 129bb13affcf..8f1af4ca7063 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -1,21 +1,128 @@ -PREHOOK: query: drop table simple123 -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table simple123 -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table simple123(key int) partitioned by (key123 int) tblproperties ('hivecommit'='true') +PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@simple123 -POSTHOOK: query: create table simple123(key int) partitioned by (key123 int) tblproperties ('hivecommit'='true') +PREHOOK: Output: default@intermediate +POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@simple123 -PREHOOK: query: insert into table simple123 partition(key123='455') select key from src limit 3 +POSTHOOK: Output: default@intermediate +PREHOOK: query: insert into table intermediate partition(p='455') select key from src limit 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=455 +POSTHOOK: query: insert into table intermediate partition(p='455') select key from src limit 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=455 +POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='456') select key from src limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: default@simple123@key123=455 -POSTHOOK: query: insert into table simple123 partition(key123='455') select key from src limit 3 +PREHOOK: Output: default@intermediate@p=456 +POSTHOOK: query: insert into table intermediate partition(p='456') select key from src limit 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: default@simple123@key123=455 -POSTHOOK: Lineage: simple123 PARTITION(key123=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Output: default@intermediate@p=456 +POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='457') select key from src limit 3 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=457 +POSTHOOK: query: insert into table intermediate partition(p='457') select key from src limit 3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=457 +POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: create table simple_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@simple_mm +POSTHOOK: query: create table simple_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@simple_mm +PREHOOK: query: explain insert into table simple_mm partition(key_mm='455') select key from intermediate +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table simple_mm partition(key_mm='455') select key from intermediate +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: intermediate + Statistics: Num rows: 9 Data size: 108 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 9 Data size: 108 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 9 Data size: 108 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.simple_mm + Execution mode: llap + LLAP IO: all inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + key_mm 455 + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.simple_mm + micromanaged table: true + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert into table simple_mm partition(key_mm='455') select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@simple_mm@key_mm=455 +POSTHOOK: query: insert into table simple_mm partition(key_mm='455') select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@simple_mm@key_mm=455 +POSTHOOK: Lineage: simple_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: drop table simple_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@simple_mm +PREHOOK: Output: default@simple_mm +POSTHOOK: query: drop table simple_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@simple_mm +POSTHOOK: Output: default@simple_mm +PREHOOK: query: drop table intermediate +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermediate +PREHOOK: Output: default@intermediate +POSTHOOK: query: drop table intermediate +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermediate +POSTHOOK: Output: default@intermediate From 30fd19f4b358f12fbbb21d75785817c20e1adf15 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Wed, 7 Sep 2016 18:51:39 -0700 Subject: [PATCH 03/24] HIVE-14637 : edit or split MoveTask to commit job results to metastore (Sergey Shelukhin) --- .../hive/ql/history/TestHiveHistory.java | 2 +- metastore/if/hive_metastore.thrift | 36 +- .../upgrade/derby/037-HIVE-14637.derby.sql | 6 + .../upgrade/derby/hive-schema-2.2.0.derby.sql | 12 +- .../derby/upgrade-2.1.0-to-2.2.0.derby.sql | 2 + .../upgrade/mssql/022-HIVE-14637.mssql.sql | 14 + .../upgrade/mssql/hive-schema-2.2.0.mssql.sql | 21 +- .../mssql/upgrade-2.1.0-to-2.2.0.mssql.sql | 2 + .../upgrade/mysql/037-HIVE-14637.mysql.sql | 14 + .../upgrade/mysql/hive-schema-2.2.0.mysql.sql | 15 + .../mysql/upgrade-2.1.0-to-2.2.0.mysql.sql | 2 + .../upgrade/oracle/037-HIVE-14637.oracle.sql | 14 + .../oracle/hive-schema-2.2.0.oracle.sql | 18 +- .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql | 2 + .../postgres/036-HIVE-14637.postgres.sql | 15 + .../postgres/hive-schema-2.2.0.postgres.sql | 22 +- .../upgrade-2.1.0-to-2.2.0.postgres.sql | 2 + .../thrift/gen-cpp/ThriftHiveMetastore.cpp | 4269 +++++++----- .../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 378 ++ .../ThriftHiveMetastore_server.skeleton.cpp | 15 + .../gen-cpp/hive_metastore_constants.cpp | 2 + .../thrift/gen-cpp/hive_metastore_constants.h | 1 + .../thrift/gen-cpp/hive_metastore_types.cpp | 1130 +++- .../gen/thrift/gen-cpp/hive_metastore_types.h | 292 +- .../metastore/api/FinalizeWriteIdRequest.java | 684 ++ .../metastore/api/FinalizeWriteIdResult.java | 283 + .../metastore/api/GetNextWriteIdRequest.java | 490 ++ .../metastore/api/GetNextWriteIdResult.java | 387 ++ .../api/HeartbeatWriteIdRequest.java | 589 ++ .../metastore/api/HeartbeatWriteIdResult.java | 283 + .../hadoop/hive/metastore/api/Table.java | 206 +- .../metastore/api/ThriftHiveMetastore.java | 5924 ++++++++++++----- .../api/hive_metastoreConstants.java | 30 +- .../gen-php/metastore/ThriftHiveMetastore.php | 966 ++- .../gen/thrift/gen-php/metastore/Types.php | 805 ++- .../hive_metastore/ThriftHiveMetastore-remote | 21 + .../hive_metastore/ThriftHiveMetastore.py | 567 ++ .../thrift/gen-py/hive_metastore/constants.py | 1 + .../thrift/gen-py/hive_metastore/ttypes.py | 478 +- .../thrift/gen-rb/hive_metastore_constants.rb | 2 + .../gen/thrift/gen-rb/hive_metastore_types.rb | 122 +- .../thrift/gen-rb/thrift_hive_metastore.rb | 162 + .../hadoop/hive/metastore/HiveMetaStore.java | 203 +- .../hive/metastore/HiveMetaStoreClient.java | 116 +- .../hive/metastore/IMetaStoreClient.java | 7 + .../hadoop/hive/metastore/ObjectStore.java | 116 +- .../hadoop/hive/metastore/RawStore.java | 7 + .../hive/metastore/hbase/HBaseStore.java | 19 + .../hadoop/hive/metastore/model/MTable.java | 23 +- .../hive/metastore/model/MTableWrite.java | 67 + metastore/src/model/package.jdo | 29 + .../DummyRawStoreControlledCommit.java | 15 + .../DummyRawStoreForJdoConnection.java | 14 + .../org/apache/hadoop/hive/ql/Context.java | 8 +- .../hadoop/hive/ql/exec/FileSinkOperator.java | 22 +- .../apache/hadoop/hive/ql/exec/MoveTask.java | 28 +- .../apache/hadoop/hive/ql/io/AcidUtils.java | 10 + .../apache/hadoop/hive/ql/metadata/Hive.java | 118 +- .../hive/ql/optimizer/GenMapRedUtils.java | 1 + .../AnnotateRunTimeStatsOptimizer.java | 3 +- .../hive/ql/parse/DDLSemanticAnalyzer.java | 2 + .../hive/ql/parse/ImportSemanticAnalyzer.java | 2 + .../hive/ql/parse/LoadSemanticAnalyzer.java | 1 + .../hadoop/hive/ql/parse/ParseContext.java | 8 - .../hive/ql/parse/SemanticAnalyzer.java | 42 +- .../hadoop/hive/ql/parse/TaskCompiler.java | 1 + .../hadoop/hive/ql/plan/FileSinkDesc.java | 24 +- .../hadoop/hive/ql/plan/LoadTableDesc.java | 32 +- .../apache/hadoop/hive/ql/plan/MoveWork.java | 1 - .../hadoop/hive/ql/exec/TestExecDriver.java | 2 +- .../hive/ql/exec/TestFileSinkOperator.java | 2 +- .../test/queries/clientpositive/mm_current.q | 26 +- .../clientpositive/llap/mm_current.q.out | 171 +- 73 files changed, 15320 insertions(+), 4086 deletions(-) create mode 100644 metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql create mode 100644 metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql create mode 100644 metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql create mode 100644 metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql create mode 100644 metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql create mode 100644 metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java create mode 100644 metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java create mode 100644 metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java create mode 100644 metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java create mode 100644 metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java create mode 100644 metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java create mode 100644 metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 76c16367a070..0c51a68a4838 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -103,7 +103,7 @@ protected void setUp() { db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); - db.loadTable(hadoopDataFile[i], src, false, false, false, false, false); + db.loadTable(hadoopDataFile[i], src, false, false, false, false, false, null); i++; } diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index 872c0f396681..95eee271cff5 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -305,7 +305,9 @@ struct Table { 11: string viewExpandedText, // expanded view text, null for non-view 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE 13: optional PrincipalPrivilegeSet privileges, - 14: optional bool temporary=false + 14: optional bool temporary=false, + 15: optional i64 mmNextWriteId, + 16: optional i64 mmWatermarkWriteId } struct Partition { @@ -890,6 +892,33 @@ struct CacheFileMetadataRequest { 4: optional bool isAllParts } + +struct GetNextWriteIdRequest { + 1: required string dbName, + 2: required string tblName +} +struct GetNextWriteIdResult { + 1: required i64 writeId +} + +struct FinalizeWriteIdRequest { + 1: required string dbName, + 2: required string tblName, + 3: required i64 writeId, + 4: required bool commit +} +struct FinalizeWriteIdResult { +} + +struct HeartbeatWriteIdRequest { + 1: required string dbName, + 2: required string tblName, + 3: required i64 writeId +} +struct HeartbeatWriteIdResult { +} + + struct GetAllFunctionsResponse { 1: optional list functions } @@ -1438,6 +1467,9 @@ service ThriftHiveMetastore extends fb303.FacebookService ClearFileMetadataResult clear_file_metadata(1:ClearFileMetadataRequest req) CacheFileMetadataResult cache_file_metadata(1:CacheFileMetadataRequest req) + GetNextWriteIdResult get_next_write_id(1:GetNextWriteIdRequest req) + FinalizeWriteIdResult finalize_write_id(1:FinalizeWriteIdRequest req) + HeartbeatWriteIdResult heartbeat_write_id(1:HeartbeatWriteIdRequest req) } // * Note about the DDL_TIME: When creating or altering a table or a partition, @@ -1476,5 +1508,7 @@ const string META_TABLE_STORAGE = "storage_handler", const string TABLE_IS_TRANSACTIONAL = "transactional", const string TABLE_NO_AUTO_COMPACT = "no_auto_compaction", const string TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties", +const string TABLE_IS_MM = "hivecommit", + diff --git a/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql b/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql new file mode 100644 index 000000000000..8cea9f162d09 --- /dev/null +++ b/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql @@ -0,0 +1,6 @@ +ALTER TABLE "TBLS" ADD "MM_WATERMARK_WRITE_ID" BIGINT; +ALTER TABLE "TBLS" ADD "MM_NEXT_WRITE_ID" BIGINT; +CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "LAST_HEARTBEAT" BIGINT); +ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); +ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +CREATE UNIQUE INDEX "APP"."UNIQUEWRITE" ON "APP"."TBL_WRITES" ("TBL_ID", "WRITE_ID"); diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql index ae980e0899df..3c4ba4b55f55 100644 --- a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql +++ b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql @@ -60,7 +60,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), " CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR); +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "MM_WATERMARK_WRITE_ID" BIGINT, "MM_NEXT_WRITE_ID" BIGINT); CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); @@ -112,6 +112,16 @@ ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID"); +CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "LAST_HEARTBEAT" BIGINT); + +ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); + +ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +CREATE UNIQUE INDEX "APP"."UNIQUEWRITE" ON "APP"."TBL_WRITES" ("TBL_ID", "WRITE_ID"); + + + -- ---------------------------------------------- -- DDL Statements for indexes -- ---------------------------------------------- diff --git a/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql index 25a5e37c2708..67750a6e2176 100644 --- a/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql +++ b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql @@ -1,3 +1,5 @@ -- Upgrade MetaStore schema from 2.1.0 to 2.2.0 +RUN '037-HIVE-14637.derby.sql'; + UPDATE "APP".VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; diff --git a/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql b/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql new file mode 100644 index 000000000000..bb429357e567 --- /dev/null +++ b/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql @@ -0,0 +1,14 @@ +ALTER TABLE TBLS ADD MM_WATERMARK_WRITE_ID BIGINT NULL; +ALTER TABLE TBLS ADD MM_NEXT_WRITE_ID BIGINT NULL; + +CREATE TABLE TBL_WRITES +( + TW_ID BIGINT NOT NULL, + TBL_ID BIGINT NOT NULL, + WRITE_ID BIGINT NOT NULL, + STATE CHAR(1) NOT NULL, + LAST_HEARTBEAT BIGINT +); +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; +CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID); diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql index 8735b506f85e..6bd0d8780595 100644 --- a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql +++ b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql @@ -358,7 +358,9 @@ CREATE TABLE TBLS TBL_NAME nvarchar(128) NULL, TBL_TYPE nvarchar(128) NULL, VIEW_EXPANDED_TEXT text NULL, - VIEW_ORIGINAL_TEXT text NULL + VIEW_ORIGINAL_TEXT text NULL, + MM_WATERMARK_WRITE_ID BIGINT NULL, + MM_NEXT_WRITE_ID BIGINT NULL ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); @@ -591,6 +593,23 @@ CREATE TABLE NOTIFICATION_SEQUENCE ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID); + +CREATE TABLE TBL_WRITES +( + TW_ID BIGINT NOT NULL, + TBL_ID BIGINT NOT NULL, + WRITE_ID BIGINT NOT NULL, + STATE CHAR(1) NOT NULL, + LAST_HEARTBEAT BIGINT +); + +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); + +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID); + + -- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey] -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex] diff --git a/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql index df972065ddef..2e6f39447722 100644 --- a/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql +++ b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql @@ -1,4 +1,6 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE; +:r 022-HIVE-14637.mssql.sql + UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE; diff --git a/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql b/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql new file mode 100644 index 000000000000..1b740d5694dd --- /dev/null +++ b/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql @@ -0,0 +1,14 @@ +alter table `TBLS` ADD COLUMN `MM_WATERMARK_WRITE_ID` bigint(20); +alter table `TBLS` ADD COLUMN `MM_NEXT_WRITE_ID` bigint(20); + +CREATE TABLE IF NOT EXISTS `TBL_WRITES` +( + `TW_ID` BIGINT NOT NULL, + `TBL_ID` BIGINT NOT NULL, + `WRITE_ID` BIGINT NOT NULL, + `STATE` CHAR(1) NOT NULL, + `LAST_HEARTBEAT` BIGINT, + PRIMARY KEY (`TW_ID`), + UNIQUE KEY `UNIQUEWRITE` (`TBL_ID`,`WRITE_ID`), + CONSTRAINT `TBL_WRITES_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; diff --git a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql index 91e221d8db06..f7ef94886d26 100644 --- a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql +++ b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql @@ -587,6 +587,8 @@ CREATE TABLE IF NOT EXISTS `TBLS` ( `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `VIEW_EXPANDED_TEXT` mediumtext, `VIEW_ORIGINAL_TEXT` mediumtext, + `MM_WATERMARK_WRITE_ID` bigint(20), + `MM_NEXT_WRITE_ID` bigint(20), PRIMARY KEY (`TBL_ID`), UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), KEY `TBLS_N50` (`SD_ID`), @@ -827,6 +829,19 @@ CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE; +CREATE TABLE IF NOT EXISTS `TBL_WRITES` +( + `TW_ID` BIGINT NOT NULL, + `TBL_ID` BIGINT NOT NULL, + `WRITE_ID` BIGINT NOT NULL, + `STATE` CHAR(1) NOT NULL, + `LAST_HEARTBEAT` BIGINT, + PRIMARY KEY (`TW_ID`), + UNIQUE KEY `UNIQUEWRITE` (`TBL_ID`,`WRITE_ID`), + CONSTRAINT `TBL_WRITES_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + -- ---------------------------- -- Transaction and Lock Tables -- ---------------------------- diff --git a/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql index de38b58dbe08..6ac1b8931142 100644 --- a/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql +++ b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql @@ -1,5 +1,7 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' '; +SOURCE 037-HIVE-14637.mysql.sql; + UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' '; diff --git a/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql b/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql new file mode 100644 index 000000000000..bc5fb6b689e7 --- /dev/null +++ b/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql @@ -0,0 +1,14 @@ +ALTER TABLE TBLS ADD MM_WATERMARK_WRITE_ID NUMBER; +ALTER TABLE TBLS ADD MM_NEXT_WRITE_ID NUMBER; + +CREATE TABLE TBL_WRITES +( + TW_ID NUMBER NOT NULL, + TBL_ID NUMBER NOT NULL, + WRITE_ID NUMBER NOT NULL, + STATE CHAR(1) NOT NULL, + LAST_HEARTBEAT NUMBER NOT NULL +); +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; +CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID); diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql index 39ba7cb3b5e8..503ce09e0bf6 100644 --- a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql @@ -375,7 +375,9 @@ CREATE TABLE TBLS TBL_NAME VARCHAR2(128) NULL, TBL_TYPE VARCHAR2(128) NULL, VIEW_EXPANDED_TEXT CLOB NULL, - VIEW_ORIGINAL_TEXT CLOB NULL + VIEW_ORIGINAL_TEXT CLOB NULL, + MM_WATERMARK_WRITE_ID NUMBER NULL, + MM_NEXT_WRITE_ID NUMBER NULL ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); @@ -797,6 +799,20 @@ ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAIN CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); +CREATE TABLE TBL_WRITES +( + TW_ID NUMBER NOT NULL, + TBL_ID NUMBER NOT NULL, + WRITE_ID NUMBER NOT NULL, + STATE CHAR(1) NOT NULL, + LAST_HEARTBEAT NUMBER NOT NULL +); + +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); + +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID); ------------------------------ -- Transaction and lock tables diff --git a/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql index 66784a4e0ec2..8d841d6f7ea5 100644 --- a/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql @@ -1,4 +1,6 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual; +@037-HIVE-14637.oracle.sql; + UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual; diff --git a/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql b/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql new file mode 100644 index 000000000000..d94c19d729ad --- /dev/null +++ b/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql @@ -0,0 +1,15 @@ + +ALTER TABLE "TBLS" ADD COLUMN "MM_WATERMARK_WRITE_ID" bigint; +ALTER TABLE "TBLS" ADD COLUMN "MM_NEXT_WRITE_ID" bigint; + +CREATE TABLE "TBL_WRITES" +( + "TW_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL, + "WRITE_ID" BIGINT NOT NULL, + "STATE" CHAR(1) NOT NULL, + "LAST_HEARTBEAT" BIGINT NOT NULL +); +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE; +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "UNIQUEWRITE" UNIQUE ("TBL_ID", "WRITE_ID"); diff --git a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql index 63ac3befc2b4..bf1d76960d68 100644 --- a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql +++ b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql @@ -372,7 +372,9 @@ CREATE TABLE "TBLS" ( "TBL_NAME" character varying(128) DEFAULT NULL::character varying, "TBL_TYPE" character varying(128) DEFAULT NULL::character varying, "VIEW_EXPANDED_TEXT" text, - "VIEW_ORIGINAL_TEXT" text + "VIEW_ORIGINAL_TEXT" text, + "MM_WATERMARK_WRITE_ID" bigint, + "MM_NEXT_WRITE_ID" bigint ); @@ -604,6 +606,24 @@ CREATE TABLE "KEY_CONSTRAINTS" CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID"); + + +CREATE TABLE "TBL_WRITES" +( + "TW_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL, + "WRITE_ID" BIGINT NOT NULL, + "STATE" CHAR(1) NOT NULL, + "LAST_HEARTBEAT" BIGINT NOT NULL +); + +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); + +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE; + +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "UNIQUEWRITE" UNIQUE ("TBL_ID", "WRITE_ID"); + + -- -- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: -- diff --git a/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql index 0b4591d5aabf..70542b8a6e6a 100644 --- a/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql +++ b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql @@ -1,5 +1,7 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0'; +\i 036-HIVE-14637.postgres.sql; + UPDATE "VERSION" SET "SCHEMA_VERSION"='2.2.0', "VERSION_COMMENT"='Hive release version 2.2.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0'; diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 5a35a50e7922..5ed3912f00f5 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size781; - ::apache::thrift::protocol::TType _etype784; - xfer += iprot->readListBegin(_etype784, _size781); - this->success.resize(_size781); - uint32_t _i785; - for (_i785 = 0; _i785 < _size781; ++_i785) + uint32_t _size793; + ::apache::thrift::protocol::TType _etype796; + xfer += iprot->readListBegin(_etype796, _size793); + this->success.resize(_size793); + uint32_t _i797; + for (_i797 = 0; _i797 < _size793; ++_i797) { - xfer += iprot->readString(this->success[_i785]); + xfer += iprot->readString(this->success[_i797]); } xfer += iprot->readListEnd(); } @@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter786; - for (_iter786 = this->success.begin(); _iter786 != this->success.end(); ++_iter786) + std::vector ::const_iterator _iter798; + for (_iter798 = this->success.begin(); _iter798 != this->success.end(); ++_iter798) { - xfer += oprot->writeString((*_iter786)); + xfer += oprot->writeString((*_iter798)); } xfer += oprot->writeListEnd(); } @@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size787; - ::apache::thrift::protocol::TType _etype790; - xfer += iprot->readListBegin(_etype790, _size787); - (*(this->success)).resize(_size787); - uint32_t _i791; - for (_i791 = 0; _i791 < _size787; ++_i791) + uint32_t _size799; + ::apache::thrift::protocol::TType _etype802; + xfer += iprot->readListBegin(_etype802, _size799); + (*(this->success)).resize(_size799); + uint32_t _i803; + for (_i803 = 0; _i803 < _size799; ++_i803) { - xfer += iprot->readString((*(this->success))[_i791]); + xfer += iprot->readString((*(this->success))[_i803]); } xfer += iprot->readListEnd(); } @@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size792; - ::apache::thrift::protocol::TType _etype795; - xfer += iprot->readListBegin(_etype795, _size792); - this->success.resize(_size792); - uint32_t _i796; - for (_i796 = 0; _i796 < _size792; ++_i796) + uint32_t _size804; + ::apache::thrift::protocol::TType _etype807; + xfer += iprot->readListBegin(_etype807, _size804); + this->success.resize(_size804); + uint32_t _i808; + for (_i808 = 0; _i808 < _size804; ++_i808) { - xfer += iprot->readString(this->success[_i796]); + xfer += iprot->readString(this->success[_i808]); } xfer += iprot->readListEnd(); } @@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter797; - for (_iter797 = this->success.begin(); _iter797 != this->success.end(); ++_iter797) + std::vector ::const_iterator _iter809; + for (_iter809 = this->success.begin(); _iter809 != this->success.end(); ++_iter809) { - xfer += oprot->writeString((*_iter797)); + xfer += oprot->writeString((*_iter809)); } xfer += oprot->writeListEnd(); } @@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size798; - ::apache::thrift::protocol::TType _etype801; - xfer += iprot->readListBegin(_etype801, _size798); - (*(this->success)).resize(_size798); - uint32_t _i802; - for (_i802 = 0; _i802 < _size798; ++_i802) + uint32_t _size810; + ::apache::thrift::protocol::TType _etype813; + xfer += iprot->readListBegin(_etype813, _size810); + (*(this->success)).resize(_size810); + uint32_t _i814; + for (_i814 = 0; _i814 < _size810; ++_i814) { - xfer += iprot->readString((*(this->success))[_i802]); + xfer += iprot->readString((*(this->success))[_i814]); } xfer += iprot->readListEnd(); } @@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size803; - ::apache::thrift::protocol::TType _ktype804; - ::apache::thrift::protocol::TType _vtype805; - xfer += iprot->readMapBegin(_ktype804, _vtype805, _size803); - uint32_t _i807; - for (_i807 = 0; _i807 < _size803; ++_i807) + uint32_t _size815; + ::apache::thrift::protocol::TType _ktype816; + ::apache::thrift::protocol::TType _vtype817; + xfer += iprot->readMapBegin(_ktype816, _vtype817, _size815); + uint32_t _i819; + for (_i819 = 0; _i819 < _size815; ++_i819) { - std::string _key808; - xfer += iprot->readString(_key808); - Type& _val809 = this->success[_key808]; - xfer += _val809.read(iprot); + std::string _key820; + xfer += iprot->readString(_key820); + Type& _val821 = this->success[_key820]; + xfer += _val821.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter810; - for (_iter810 = this->success.begin(); _iter810 != this->success.end(); ++_iter810) + std::map ::const_iterator _iter822; + for (_iter822 = this->success.begin(); _iter822 != this->success.end(); ++_iter822) { - xfer += oprot->writeString(_iter810->first); - xfer += _iter810->second.write(oprot); + xfer += oprot->writeString(_iter822->first); + xfer += _iter822->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size811; - ::apache::thrift::protocol::TType _ktype812; - ::apache::thrift::protocol::TType _vtype813; - xfer += iprot->readMapBegin(_ktype812, _vtype813, _size811); - uint32_t _i815; - for (_i815 = 0; _i815 < _size811; ++_i815) + uint32_t _size823; + ::apache::thrift::protocol::TType _ktype824; + ::apache::thrift::protocol::TType _vtype825; + xfer += iprot->readMapBegin(_ktype824, _vtype825, _size823); + uint32_t _i827; + for (_i827 = 0; _i827 < _size823; ++_i827) { - std::string _key816; - xfer += iprot->readString(_key816); - Type& _val817 = (*(this->success))[_key816]; - xfer += _val817.read(iprot); + std::string _key828; + xfer += iprot->readString(_key828); + Type& _val829 = (*(this->success))[_key828]; + xfer += _val829.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size818; - ::apache::thrift::protocol::TType _etype821; - xfer += iprot->readListBegin(_etype821, _size818); - this->success.resize(_size818); - uint32_t _i822; - for (_i822 = 0; _i822 < _size818; ++_i822) + uint32_t _size830; + ::apache::thrift::protocol::TType _etype833; + xfer += iprot->readListBegin(_etype833, _size830); + this->success.resize(_size830); + uint32_t _i834; + for (_i834 = 0; _i834 < _size830; ++_i834) { - xfer += this->success[_i822].read(iprot); + xfer += this->success[_i834].read(iprot); } xfer += iprot->readListEnd(); } @@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter823; - for (_iter823 = this->success.begin(); _iter823 != this->success.end(); ++_iter823) + std::vector ::const_iterator _iter835; + for (_iter835 = this->success.begin(); _iter835 != this->success.end(); ++_iter835) { - xfer += (*_iter823).write(oprot); + xfer += (*_iter835).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size824; - ::apache::thrift::protocol::TType _etype827; - xfer += iprot->readListBegin(_etype827, _size824); - (*(this->success)).resize(_size824); - uint32_t _i828; - for (_i828 = 0; _i828 < _size824; ++_i828) + uint32_t _size836; + ::apache::thrift::protocol::TType _etype839; + xfer += iprot->readListBegin(_etype839, _size836); + (*(this->success)).resize(_size836); + uint32_t _i840; + for (_i840 = 0; _i840 < _size836; ++_i840) { - xfer += (*(this->success))[_i828].read(iprot); + xfer += (*(this->success))[_i840].read(iprot); } xfer += iprot->readListEnd(); } @@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size829; - ::apache::thrift::protocol::TType _etype832; - xfer += iprot->readListBegin(_etype832, _size829); - this->success.resize(_size829); - uint32_t _i833; - for (_i833 = 0; _i833 < _size829; ++_i833) + uint32_t _size841; + ::apache::thrift::protocol::TType _etype844; + xfer += iprot->readListBegin(_etype844, _size841); + this->success.resize(_size841); + uint32_t _i845; + for (_i845 = 0; _i845 < _size841; ++_i845) { - xfer += this->success[_i833].read(iprot); + xfer += this->success[_i845].read(iprot); } xfer += iprot->readListEnd(); } @@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter834; - for (_iter834 = this->success.begin(); _iter834 != this->success.end(); ++_iter834) + std::vector ::const_iterator _iter846; + for (_iter846 = this->success.begin(); _iter846 != this->success.end(); ++_iter846) { - xfer += (*_iter834).write(oprot); + xfer += (*_iter846).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size835; - ::apache::thrift::protocol::TType _etype838; - xfer += iprot->readListBegin(_etype838, _size835); - (*(this->success)).resize(_size835); - uint32_t _i839; - for (_i839 = 0; _i839 < _size835; ++_i839) + uint32_t _size847; + ::apache::thrift::protocol::TType _etype850; + xfer += iprot->readListBegin(_etype850, _size847); + (*(this->success)).resize(_size847); + uint32_t _i851; + for (_i851 = 0; _i851 < _size847; ++_i851) { - xfer += (*(this->success))[_i839].read(iprot); + xfer += (*(this->success))[_i851].read(iprot); } xfer += iprot->readListEnd(); } @@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size840; - ::apache::thrift::protocol::TType _etype843; - xfer += iprot->readListBegin(_etype843, _size840); - this->success.resize(_size840); - uint32_t _i844; - for (_i844 = 0; _i844 < _size840; ++_i844) + uint32_t _size852; + ::apache::thrift::protocol::TType _etype855; + xfer += iprot->readListBegin(_etype855, _size852); + this->success.resize(_size852); + uint32_t _i856; + for (_i856 = 0; _i856 < _size852; ++_i856) { - xfer += this->success[_i844].read(iprot); + xfer += this->success[_i856].read(iprot); } xfer += iprot->readListEnd(); } @@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter845; - for (_iter845 = this->success.begin(); _iter845 != this->success.end(); ++_iter845) + std::vector ::const_iterator _iter857; + for (_iter857 = this->success.begin(); _iter857 != this->success.end(); ++_iter857) { - xfer += (*_iter845).write(oprot); + xfer += (*_iter857).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size846; - ::apache::thrift::protocol::TType _etype849; - xfer += iprot->readListBegin(_etype849, _size846); - (*(this->success)).resize(_size846); - uint32_t _i850; - for (_i850 = 0; _i850 < _size846; ++_i850) + uint32_t _size858; + ::apache::thrift::protocol::TType _etype861; + xfer += iprot->readListBegin(_etype861, _size858); + (*(this->success)).resize(_size858); + uint32_t _i862; + for (_i862 = 0; _i862 < _size858; ++_i862) { - xfer += (*(this->success))[_i850].read(iprot); + xfer += (*(this->success))[_i862].read(iprot); } xfer += iprot->readListEnd(); } @@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size851; - ::apache::thrift::protocol::TType _etype854; - xfer += iprot->readListBegin(_etype854, _size851); - this->success.resize(_size851); - uint32_t _i855; - for (_i855 = 0; _i855 < _size851; ++_i855) + uint32_t _size863; + ::apache::thrift::protocol::TType _etype866; + xfer += iprot->readListBegin(_etype866, _size863); + this->success.resize(_size863); + uint32_t _i867; + for (_i867 = 0; _i867 < _size863; ++_i867) { - xfer += this->success[_i855].read(iprot); + xfer += this->success[_i867].read(iprot); } xfer += iprot->readListEnd(); } @@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter856; - for (_iter856 = this->success.begin(); _iter856 != this->success.end(); ++_iter856) + std::vector ::const_iterator _iter868; + for (_iter868 = this->success.begin(); _iter868 != this->success.end(); ++_iter868) { - xfer += (*_iter856).write(oprot); + xfer += (*_iter868).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size857; - ::apache::thrift::protocol::TType _etype860; - xfer += iprot->readListBegin(_etype860, _size857); - (*(this->success)).resize(_size857); - uint32_t _i861; - for (_i861 = 0; _i861 < _size857; ++_i861) + uint32_t _size869; + ::apache::thrift::protocol::TType _etype872; + xfer += iprot->readListBegin(_etype872, _size869); + (*(this->success)).resize(_size869); + uint32_t _i873; + for (_i873 = 0; _i873 < _size869; ++_i873) { - xfer += (*(this->success))[_i861].read(iprot); + xfer += (*(this->success))[_i873].read(iprot); } xfer += iprot->readListEnd(); } @@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size862; - ::apache::thrift::protocol::TType _etype865; - xfer += iprot->readListBegin(_etype865, _size862); - this->primaryKeys.resize(_size862); - uint32_t _i866; - for (_i866 = 0; _i866 < _size862; ++_i866) + uint32_t _size874; + ::apache::thrift::protocol::TType _etype877; + xfer += iprot->readListBegin(_etype877, _size874); + this->primaryKeys.resize(_size874); + uint32_t _i878; + for (_i878 = 0; _i878 < _size874; ++_i878) { - xfer += this->primaryKeys[_i866].read(iprot); + xfer += this->primaryKeys[_i878].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size867; - ::apache::thrift::protocol::TType _etype870; - xfer += iprot->readListBegin(_etype870, _size867); - this->foreignKeys.resize(_size867); - uint32_t _i871; - for (_i871 = 0; _i871 < _size867; ++_i871) + uint32_t _size879; + ::apache::thrift::protocol::TType _etype882; + xfer += iprot->readListBegin(_etype882, _size879); + this->foreignKeys.resize(_size879); + uint32_t _i883; + for (_i883 = 0; _i883 < _size879; ++_i883) { - xfer += this->foreignKeys[_i871].read(iprot); + xfer += this->foreignKeys[_i883].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,10 +4578,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter872; - for (_iter872 = this->primaryKeys.begin(); _iter872 != this->primaryKeys.end(); ++_iter872) + std::vector ::const_iterator _iter884; + for (_iter884 = this->primaryKeys.begin(); _iter884 != this->primaryKeys.end(); ++_iter884) { - xfer += (*_iter872).write(oprot); + xfer += (*_iter884).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4590,10 +4590,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter873; - for (_iter873 = this->foreignKeys.begin(); _iter873 != this->foreignKeys.end(); ++_iter873) + std::vector ::const_iterator _iter885; + for (_iter885 = this->foreignKeys.begin(); _iter885 != this->foreignKeys.end(); ++_iter885) { - xfer += (*_iter873).write(oprot); + xfer += (*_iter885).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4621,10 +4621,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter874; - for (_iter874 = (*(this->primaryKeys)).begin(); _iter874 != (*(this->primaryKeys)).end(); ++_iter874) + std::vector ::const_iterator _iter886; + for (_iter886 = (*(this->primaryKeys)).begin(); _iter886 != (*(this->primaryKeys)).end(); ++_iter886) { - xfer += (*_iter874).write(oprot); + xfer += (*_iter886).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4633,10 +4633,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter875; - for (_iter875 = (*(this->foreignKeys)).begin(); _iter875 != (*(this->foreignKeys)).end(); ++_iter875) + std::vector ::const_iterator _iter887; + for (_iter887 = (*(this->foreignKeys)).begin(); _iter887 != (*(this->foreignKeys)).end(); ++_iter887) { - xfer += (*_iter875).write(oprot); + xfer += (*_iter887).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6055,14 +6055,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size876; - ::apache::thrift::protocol::TType _etype879; - xfer += iprot->readListBegin(_etype879, _size876); - this->success.resize(_size876); - uint32_t _i880; - for (_i880 = 0; _i880 < _size876; ++_i880) + uint32_t _size888; + ::apache::thrift::protocol::TType _etype891; + xfer += iprot->readListBegin(_etype891, _size888); + this->success.resize(_size888); + uint32_t _i892; + for (_i892 = 0; _i892 < _size888; ++_i892) { - xfer += iprot->readString(this->success[_i880]); + xfer += iprot->readString(this->success[_i892]); } xfer += iprot->readListEnd(); } @@ -6101,10 +6101,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter881; - for (_iter881 = this->success.begin(); _iter881 != this->success.end(); ++_iter881) + std::vector ::const_iterator _iter893; + for (_iter893 = this->success.begin(); _iter893 != this->success.end(); ++_iter893) { - xfer += oprot->writeString((*_iter881)); + xfer += oprot->writeString((*_iter893)); } xfer += oprot->writeListEnd(); } @@ -6149,14 +6149,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size882; - ::apache::thrift::protocol::TType _etype885; - xfer += iprot->readListBegin(_etype885, _size882); - (*(this->success)).resize(_size882); - uint32_t _i886; - for (_i886 = 0; _i886 < _size882; ++_i886) + uint32_t _size894; + ::apache::thrift::protocol::TType _etype897; + xfer += iprot->readListBegin(_etype897, _size894); + (*(this->success)).resize(_size894); + uint32_t _i898; + for (_i898 = 0; _i898 < _size894; ++_i898) { - xfer += iprot->readString((*(this->success))[_i886]); + xfer += iprot->readString((*(this->success))[_i898]); } xfer += iprot->readListEnd(); } @@ -6231,14 +6231,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size887; - ::apache::thrift::protocol::TType _etype890; - xfer += iprot->readListBegin(_etype890, _size887); - this->tbl_types.resize(_size887); - uint32_t _i891; - for (_i891 = 0; _i891 < _size887; ++_i891) + uint32_t _size899; + ::apache::thrift::protocol::TType _etype902; + xfer += iprot->readListBegin(_etype902, _size899); + this->tbl_types.resize(_size899); + uint32_t _i903; + for (_i903 = 0; _i903 < _size899; ++_i903) { - xfer += iprot->readString(this->tbl_types[_i891]); + xfer += iprot->readString(this->tbl_types[_i903]); } xfer += iprot->readListEnd(); } @@ -6275,10 +6275,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter892; - for (_iter892 = this->tbl_types.begin(); _iter892 != this->tbl_types.end(); ++_iter892) + std::vector ::const_iterator _iter904; + for (_iter904 = this->tbl_types.begin(); _iter904 != this->tbl_types.end(); ++_iter904) { - xfer += oprot->writeString((*_iter892)); + xfer += oprot->writeString((*_iter904)); } xfer += oprot->writeListEnd(); } @@ -6310,10 +6310,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter893; - for (_iter893 = (*(this->tbl_types)).begin(); _iter893 != (*(this->tbl_types)).end(); ++_iter893) + std::vector ::const_iterator _iter905; + for (_iter905 = (*(this->tbl_types)).begin(); _iter905 != (*(this->tbl_types)).end(); ++_iter905) { - xfer += oprot->writeString((*_iter893)); + xfer += oprot->writeString((*_iter905)); } xfer += oprot->writeListEnd(); } @@ -6354,14 +6354,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size894; - ::apache::thrift::protocol::TType _etype897; - xfer += iprot->readListBegin(_etype897, _size894); - this->success.resize(_size894); - uint32_t _i898; - for (_i898 = 0; _i898 < _size894; ++_i898) + uint32_t _size906; + ::apache::thrift::protocol::TType _etype909; + xfer += iprot->readListBegin(_etype909, _size906); + this->success.resize(_size906); + uint32_t _i910; + for (_i910 = 0; _i910 < _size906; ++_i910) { - xfer += this->success[_i898].read(iprot); + xfer += this->success[_i910].read(iprot); } xfer += iprot->readListEnd(); } @@ -6400,10 +6400,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter899; - for (_iter899 = this->success.begin(); _iter899 != this->success.end(); ++_iter899) + std::vector ::const_iterator _iter911; + for (_iter911 = this->success.begin(); _iter911 != this->success.end(); ++_iter911) { - xfer += (*_iter899).write(oprot); + xfer += (*_iter911).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6448,14 +6448,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size900; - ::apache::thrift::protocol::TType _etype903; - xfer += iprot->readListBegin(_etype903, _size900); - (*(this->success)).resize(_size900); - uint32_t _i904; - for (_i904 = 0; _i904 < _size900; ++_i904) + uint32_t _size912; + ::apache::thrift::protocol::TType _etype915; + xfer += iprot->readListBegin(_etype915, _size912); + (*(this->success)).resize(_size912); + uint32_t _i916; + for (_i916 = 0; _i916 < _size912; ++_i916) { - xfer += (*(this->success))[_i904].read(iprot); + xfer += (*(this->success))[_i916].read(iprot); } xfer += iprot->readListEnd(); } @@ -6593,14 +6593,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size905; - ::apache::thrift::protocol::TType _etype908; - xfer += iprot->readListBegin(_etype908, _size905); - this->success.resize(_size905); - uint32_t _i909; - for (_i909 = 0; _i909 < _size905; ++_i909) + uint32_t _size917; + ::apache::thrift::protocol::TType _etype920; + xfer += iprot->readListBegin(_etype920, _size917); + this->success.resize(_size917); + uint32_t _i921; + for (_i921 = 0; _i921 < _size917; ++_i921) { - xfer += iprot->readString(this->success[_i909]); + xfer += iprot->readString(this->success[_i921]); } xfer += iprot->readListEnd(); } @@ -6639,10 +6639,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter910; - for (_iter910 = this->success.begin(); _iter910 != this->success.end(); ++_iter910) + std::vector ::const_iterator _iter922; + for (_iter922 = this->success.begin(); _iter922 != this->success.end(); ++_iter922) { - xfer += oprot->writeString((*_iter910)); + xfer += oprot->writeString((*_iter922)); } xfer += oprot->writeListEnd(); } @@ -6687,14 +6687,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size911; - ::apache::thrift::protocol::TType _etype914; - xfer += iprot->readListBegin(_etype914, _size911); - (*(this->success)).resize(_size911); - uint32_t _i915; - for (_i915 = 0; _i915 < _size911; ++_i915) + uint32_t _size923; + ::apache::thrift::protocol::TType _etype926; + xfer += iprot->readListBegin(_etype926, _size923); + (*(this->success)).resize(_size923); + uint32_t _i927; + for (_i927 = 0; _i927 < _size923; ++_i927) { - xfer += iprot->readString((*(this->success))[_i915]); + xfer += iprot->readString((*(this->success))[_i927]); } xfer += iprot->readListEnd(); } @@ -7004,14 +7004,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size916; - ::apache::thrift::protocol::TType _etype919; - xfer += iprot->readListBegin(_etype919, _size916); - this->tbl_names.resize(_size916); - uint32_t _i920; - for (_i920 = 0; _i920 < _size916; ++_i920) + uint32_t _size928; + ::apache::thrift::protocol::TType _etype931; + xfer += iprot->readListBegin(_etype931, _size928); + this->tbl_names.resize(_size928); + uint32_t _i932; + for (_i932 = 0; _i932 < _size928; ++_i932) { - xfer += iprot->readString(this->tbl_names[_i920]); + xfer += iprot->readString(this->tbl_names[_i932]); } xfer += iprot->readListEnd(); } @@ -7044,10 +7044,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter921; - for (_iter921 = this->tbl_names.begin(); _iter921 != this->tbl_names.end(); ++_iter921) + std::vector ::const_iterator _iter933; + for (_iter933 = this->tbl_names.begin(); _iter933 != this->tbl_names.end(); ++_iter933) { - xfer += oprot->writeString((*_iter921)); + xfer += oprot->writeString((*_iter933)); } xfer += oprot->writeListEnd(); } @@ -7075,10 +7075,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter922; - for (_iter922 = (*(this->tbl_names)).begin(); _iter922 != (*(this->tbl_names)).end(); ++_iter922) + std::vector ::const_iterator _iter934; + for (_iter934 = (*(this->tbl_names)).begin(); _iter934 != (*(this->tbl_names)).end(); ++_iter934) { - xfer += oprot->writeString((*_iter922)); + xfer += oprot->writeString((*_iter934)); } xfer += oprot->writeListEnd(); } @@ -7119,14 +7119,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size923; - ::apache::thrift::protocol::TType _etype926; - xfer += iprot->readListBegin(_etype926, _size923); - this->success.resize(_size923); - uint32_t _i927; - for (_i927 = 0; _i927 < _size923; ++_i927) + uint32_t _size935; + ::apache::thrift::protocol::TType _etype938; + xfer += iprot->readListBegin(_etype938, _size935); + this->success.resize(_size935); + uint32_t _i939; + for (_i939 = 0; _i939 < _size935; ++_i939) { - xfer += this->success[_i927].read(iprot); + xfer += this->success[_i939].read(iprot); } xfer += iprot->readListEnd(); } @@ -7181,10 +7181,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter928; - for (_iter928 = this->success.begin(); _iter928 != this->success.end(); ++_iter928) + std::vector
::const_iterator _iter940; + for (_iter940 = this->success.begin(); _iter940 != this->success.end(); ++_iter940) { - xfer += (*_iter928).write(oprot); + xfer += (*_iter940).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7237,14 +7237,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size929; - ::apache::thrift::protocol::TType _etype932; - xfer += iprot->readListBegin(_etype932, _size929); - (*(this->success)).resize(_size929); - uint32_t _i933; - for (_i933 = 0; _i933 < _size929; ++_i933) + uint32_t _size941; + ::apache::thrift::protocol::TType _etype944; + xfer += iprot->readListBegin(_etype944, _size941); + (*(this->success)).resize(_size941); + uint32_t _i945; + for (_i945 = 0; _i945 < _size941; ++_i945) { - xfer += (*(this->success))[_i933].read(iprot); + xfer += (*(this->success))[_i945].read(iprot); } xfer += iprot->readListEnd(); } @@ -7430,14 +7430,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size934; - ::apache::thrift::protocol::TType _etype937; - xfer += iprot->readListBegin(_etype937, _size934); - this->success.resize(_size934); - uint32_t _i938; - for (_i938 = 0; _i938 < _size934; ++_i938) + uint32_t _size946; + ::apache::thrift::protocol::TType _etype949; + xfer += iprot->readListBegin(_etype949, _size946); + this->success.resize(_size946); + uint32_t _i950; + for (_i950 = 0; _i950 < _size946; ++_i950) { - xfer += iprot->readString(this->success[_i938]); + xfer += iprot->readString(this->success[_i950]); } xfer += iprot->readListEnd(); } @@ -7492,10 +7492,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter939; - for (_iter939 = this->success.begin(); _iter939 != this->success.end(); ++_iter939) + std::vector ::const_iterator _iter951; + for (_iter951 = this->success.begin(); _iter951 != this->success.end(); ++_iter951) { - xfer += oprot->writeString((*_iter939)); + xfer += oprot->writeString((*_iter951)); } xfer += oprot->writeListEnd(); } @@ -7548,14 +7548,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size940; - ::apache::thrift::protocol::TType _etype943; - xfer += iprot->readListBegin(_etype943, _size940); - (*(this->success)).resize(_size940); - uint32_t _i944; - for (_i944 = 0; _i944 < _size940; ++_i944) + uint32_t _size952; + ::apache::thrift::protocol::TType _etype955; + xfer += iprot->readListBegin(_etype955, _size952); + (*(this->success)).resize(_size952); + uint32_t _i956; + for (_i956 = 0; _i956 < _size952; ++_i956) { - xfer += iprot->readString((*(this->success))[_i944]); + xfer += iprot->readString((*(this->success))[_i956]); } xfer += iprot->readListEnd(); } @@ -8889,14 +8889,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size945; - ::apache::thrift::protocol::TType _etype948; - xfer += iprot->readListBegin(_etype948, _size945); - this->new_parts.resize(_size945); - uint32_t _i949; - for (_i949 = 0; _i949 < _size945; ++_i949) + uint32_t _size957; + ::apache::thrift::protocol::TType _etype960; + xfer += iprot->readListBegin(_etype960, _size957); + this->new_parts.resize(_size957); + uint32_t _i961; + for (_i961 = 0; _i961 < _size957; ++_i961) { - xfer += this->new_parts[_i949].read(iprot); + xfer += this->new_parts[_i961].read(iprot); } xfer += iprot->readListEnd(); } @@ -8925,10 +8925,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter950; - for (_iter950 = this->new_parts.begin(); _iter950 != this->new_parts.end(); ++_iter950) + std::vector ::const_iterator _iter962; + for (_iter962 = this->new_parts.begin(); _iter962 != this->new_parts.end(); ++_iter962) { - xfer += (*_iter950).write(oprot); + xfer += (*_iter962).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8952,10 +8952,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter951; - for (_iter951 = (*(this->new_parts)).begin(); _iter951 != (*(this->new_parts)).end(); ++_iter951) + std::vector ::const_iterator _iter963; + for (_iter963 = (*(this->new_parts)).begin(); _iter963 != (*(this->new_parts)).end(); ++_iter963) { - xfer += (*_iter951).write(oprot); + xfer += (*_iter963).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9164,14 +9164,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size952; - ::apache::thrift::protocol::TType _etype955; - xfer += iprot->readListBegin(_etype955, _size952); - this->new_parts.resize(_size952); - uint32_t _i956; - for (_i956 = 0; _i956 < _size952; ++_i956) + uint32_t _size964; + ::apache::thrift::protocol::TType _etype967; + xfer += iprot->readListBegin(_etype967, _size964); + this->new_parts.resize(_size964); + uint32_t _i968; + for (_i968 = 0; _i968 < _size964; ++_i968) { - xfer += this->new_parts[_i956].read(iprot); + xfer += this->new_parts[_i968].read(iprot); } xfer += iprot->readListEnd(); } @@ -9200,10 +9200,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter957; - for (_iter957 = this->new_parts.begin(); _iter957 != this->new_parts.end(); ++_iter957) + std::vector ::const_iterator _iter969; + for (_iter969 = this->new_parts.begin(); _iter969 != this->new_parts.end(); ++_iter969) { - xfer += (*_iter957).write(oprot); + xfer += (*_iter969).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9227,10 +9227,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter958; - for (_iter958 = (*(this->new_parts)).begin(); _iter958 != (*(this->new_parts)).end(); ++_iter958) + std::vector ::const_iterator _iter970; + for (_iter970 = (*(this->new_parts)).begin(); _iter970 != (*(this->new_parts)).end(); ++_iter970) { - xfer += (*_iter958).write(oprot); + xfer += (*_iter970).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9455,14 +9455,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size959; - ::apache::thrift::protocol::TType _etype962; - xfer += iprot->readListBegin(_etype962, _size959); - this->part_vals.resize(_size959); - uint32_t _i963; - for (_i963 = 0; _i963 < _size959; ++_i963) + uint32_t _size971; + ::apache::thrift::protocol::TType _etype974; + xfer += iprot->readListBegin(_etype974, _size971); + this->part_vals.resize(_size971); + uint32_t _i975; + for (_i975 = 0; _i975 < _size971; ++_i975) { - xfer += iprot->readString(this->part_vals[_i963]); + xfer += iprot->readString(this->part_vals[_i975]); } xfer += iprot->readListEnd(); } @@ -9499,10 +9499,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter964; - for (_iter964 = this->part_vals.begin(); _iter964 != this->part_vals.end(); ++_iter964) + std::vector ::const_iterator _iter976; + for (_iter976 = this->part_vals.begin(); _iter976 != this->part_vals.end(); ++_iter976) { - xfer += oprot->writeString((*_iter964)); + xfer += oprot->writeString((*_iter976)); } xfer += oprot->writeListEnd(); } @@ -9534,10 +9534,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter965; - for (_iter965 = (*(this->part_vals)).begin(); _iter965 != (*(this->part_vals)).end(); ++_iter965) + std::vector ::const_iterator _iter977; + for (_iter977 = (*(this->part_vals)).begin(); _iter977 != (*(this->part_vals)).end(); ++_iter977) { - xfer += oprot->writeString((*_iter965)); + xfer += oprot->writeString((*_iter977)); } xfer += oprot->writeListEnd(); } @@ -10009,14 +10009,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size966; - ::apache::thrift::protocol::TType _etype969; - xfer += iprot->readListBegin(_etype969, _size966); - this->part_vals.resize(_size966); - uint32_t _i970; - for (_i970 = 0; _i970 < _size966; ++_i970) + uint32_t _size978; + ::apache::thrift::protocol::TType _etype981; + xfer += iprot->readListBegin(_etype981, _size978); + this->part_vals.resize(_size978); + uint32_t _i982; + for (_i982 = 0; _i982 < _size978; ++_i982) { - xfer += iprot->readString(this->part_vals[_i970]); + xfer += iprot->readString(this->part_vals[_i982]); } xfer += iprot->readListEnd(); } @@ -10061,10 +10061,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter971; - for (_iter971 = this->part_vals.begin(); _iter971 != this->part_vals.end(); ++_iter971) + std::vector ::const_iterator _iter983; + for (_iter983 = this->part_vals.begin(); _iter983 != this->part_vals.end(); ++_iter983) { - xfer += oprot->writeString((*_iter971)); + xfer += oprot->writeString((*_iter983)); } xfer += oprot->writeListEnd(); } @@ -10100,10 +10100,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter972; - for (_iter972 = (*(this->part_vals)).begin(); _iter972 != (*(this->part_vals)).end(); ++_iter972) + std::vector ::const_iterator _iter984; + for (_iter984 = (*(this->part_vals)).begin(); _iter984 != (*(this->part_vals)).end(); ++_iter984) { - xfer += oprot->writeString((*_iter972)); + xfer += oprot->writeString((*_iter984)); } xfer += oprot->writeListEnd(); } @@ -10906,14 +10906,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size973; - ::apache::thrift::protocol::TType _etype976; - xfer += iprot->readListBegin(_etype976, _size973); - this->part_vals.resize(_size973); - uint32_t _i977; - for (_i977 = 0; _i977 < _size973; ++_i977) + uint32_t _size985; + ::apache::thrift::protocol::TType _etype988; + xfer += iprot->readListBegin(_etype988, _size985); + this->part_vals.resize(_size985); + uint32_t _i989; + for (_i989 = 0; _i989 < _size985; ++_i989) { - xfer += iprot->readString(this->part_vals[_i977]); + xfer += iprot->readString(this->part_vals[_i989]); } xfer += iprot->readListEnd(); } @@ -10958,10 +10958,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter978; - for (_iter978 = this->part_vals.begin(); _iter978 != this->part_vals.end(); ++_iter978) + std::vector ::const_iterator _iter990; + for (_iter990 = this->part_vals.begin(); _iter990 != this->part_vals.end(); ++_iter990) { - xfer += oprot->writeString((*_iter978)); + xfer += oprot->writeString((*_iter990)); } xfer += oprot->writeListEnd(); } @@ -10997,10 +10997,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter979; - for (_iter979 = (*(this->part_vals)).begin(); _iter979 != (*(this->part_vals)).end(); ++_iter979) + std::vector ::const_iterator _iter991; + for (_iter991 = (*(this->part_vals)).begin(); _iter991 != (*(this->part_vals)).end(); ++_iter991) { - xfer += oprot->writeString((*_iter979)); + xfer += oprot->writeString((*_iter991)); } xfer += oprot->writeListEnd(); } @@ -11209,14 +11209,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size980; - ::apache::thrift::protocol::TType _etype983; - xfer += iprot->readListBegin(_etype983, _size980); - this->part_vals.resize(_size980); - uint32_t _i984; - for (_i984 = 0; _i984 < _size980; ++_i984) + uint32_t _size992; + ::apache::thrift::protocol::TType _etype995; + xfer += iprot->readListBegin(_etype995, _size992); + this->part_vals.resize(_size992); + uint32_t _i996; + for (_i996 = 0; _i996 < _size992; ++_i996) { - xfer += iprot->readString(this->part_vals[_i984]); + xfer += iprot->readString(this->part_vals[_i996]); } xfer += iprot->readListEnd(); } @@ -11269,10 +11269,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter985; - for (_iter985 = this->part_vals.begin(); _iter985 != this->part_vals.end(); ++_iter985) + std::vector ::const_iterator _iter997; + for (_iter997 = this->part_vals.begin(); _iter997 != this->part_vals.end(); ++_iter997) { - xfer += oprot->writeString((*_iter985)); + xfer += oprot->writeString((*_iter997)); } xfer += oprot->writeListEnd(); } @@ -11312,10 +11312,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter986; - for (_iter986 = (*(this->part_vals)).begin(); _iter986 != (*(this->part_vals)).end(); ++_iter986) + std::vector ::const_iterator _iter998; + for (_iter998 = (*(this->part_vals)).begin(); _iter998 != (*(this->part_vals)).end(); ++_iter998) { - xfer += oprot->writeString((*_iter986)); + xfer += oprot->writeString((*_iter998)); } xfer += oprot->writeListEnd(); } @@ -12321,14 +12321,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size987; - ::apache::thrift::protocol::TType _etype990; - xfer += iprot->readListBegin(_etype990, _size987); - this->part_vals.resize(_size987); - uint32_t _i991; - for (_i991 = 0; _i991 < _size987; ++_i991) + uint32_t _size999; + ::apache::thrift::protocol::TType _etype1002; + xfer += iprot->readListBegin(_etype1002, _size999); + this->part_vals.resize(_size999); + uint32_t _i1003; + for (_i1003 = 0; _i1003 < _size999; ++_i1003) { - xfer += iprot->readString(this->part_vals[_i991]); + xfer += iprot->readString(this->part_vals[_i1003]); } xfer += iprot->readListEnd(); } @@ -12365,10 +12365,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter992; - for (_iter992 = this->part_vals.begin(); _iter992 != this->part_vals.end(); ++_iter992) + std::vector ::const_iterator _iter1004; + for (_iter1004 = this->part_vals.begin(); _iter1004 != this->part_vals.end(); ++_iter1004) { - xfer += oprot->writeString((*_iter992)); + xfer += oprot->writeString((*_iter1004)); } xfer += oprot->writeListEnd(); } @@ -12400,10 +12400,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter993; - for (_iter993 = (*(this->part_vals)).begin(); _iter993 != (*(this->part_vals)).end(); ++_iter993) + std::vector ::const_iterator _iter1005; + for (_iter1005 = (*(this->part_vals)).begin(); _iter1005 != (*(this->part_vals)).end(); ++_iter1005) { - xfer += oprot->writeString((*_iter993)); + xfer += oprot->writeString((*_iter1005)); } xfer += oprot->writeListEnd(); } @@ -12592,17 +12592,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size994; - ::apache::thrift::protocol::TType _ktype995; - ::apache::thrift::protocol::TType _vtype996; - xfer += iprot->readMapBegin(_ktype995, _vtype996, _size994); - uint32_t _i998; - for (_i998 = 0; _i998 < _size994; ++_i998) + uint32_t _size1006; + ::apache::thrift::protocol::TType _ktype1007; + ::apache::thrift::protocol::TType _vtype1008; + xfer += iprot->readMapBegin(_ktype1007, _vtype1008, _size1006); + uint32_t _i1010; + for (_i1010 = 0; _i1010 < _size1006; ++_i1010) { - std::string _key999; - xfer += iprot->readString(_key999); - std::string& _val1000 = this->partitionSpecs[_key999]; - xfer += iprot->readString(_val1000); + std::string _key1011; + xfer += iprot->readString(_key1011); + std::string& _val1012 = this->partitionSpecs[_key1011]; + xfer += iprot->readString(_val1012); } xfer += iprot->readMapEnd(); } @@ -12663,11 +12663,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1001; - for (_iter1001 = this->partitionSpecs.begin(); _iter1001 != this->partitionSpecs.end(); ++_iter1001) + std::map ::const_iterator _iter1013; + for (_iter1013 = this->partitionSpecs.begin(); _iter1013 != this->partitionSpecs.end(); ++_iter1013) { - xfer += oprot->writeString(_iter1001->first); - xfer += oprot->writeString(_iter1001->second); + xfer += oprot->writeString(_iter1013->first); + xfer += oprot->writeString(_iter1013->second); } xfer += oprot->writeMapEnd(); } @@ -12707,11 +12707,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1002; - for (_iter1002 = (*(this->partitionSpecs)).begin(); _iter1002 != (*(this->partitionSpecs)).end(); ++_iter1002) + std::map ::const_iterator _iter1014; + for (_iter1014 = (*(this->partitionSpecs)).begin(); _iter1014 != (*(this->partitionSpecs)).end(); ++_iter1014) { - xfer += oprot->writeString(_iter1002->first); - xfer += oprot->writeString(_iter1002->second); + xfer += oprot->writeString(_iter1014->first); + xfer += oprot->writeString(_iter1014->second); } xfer += oprot->writeMapEnd(); } @@ -12956,17 +12956,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1003; - ::apache::thrift::protocol::TType _ktype1004; - ::apache::thrift::protocol::TType _vtype1005; - xfer += iprot->readMapBegin(_ktype1004, _vtype1005, _size1003); - uint32_t _i1007; - for (_i1007 = 0; _i1007 < _size1003; ++_i1007) + uint32_t _size1015; + ::apache::thrift::protocol::TType _ktype1016; + ::apache::thrift::protocol::TType _vtype1017; + xfer += iprot->readMapBegin(_ktype1016, _vtype1017, _size1015); + uint32_t _i1019; + for (_i1019 = 0; _i1019 < _size1015; ++_i1019) { - std::string _key1008; - xfer += iprot->readString(_key1008); - std::string& _val1009 = this->partitionSpecs[_key1008]; - xfer += iprot->readString(_val1009); + std::string _key1020; + xfer += iprot->readString(_key1020); + std::string& _val1021 = this->partitionSpecs[_key1020]; + xfer += iprot->readString(_val1021); } xfer += iprot->readMapEnd(); } @@ -13027,11 +13027,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1010; - for (_iter1010 = this->partitionSpecs.begin(); _iter1010 != this->partitionSpecs.end(); ++_iter1010) + std::map ::const_iterator _iter1022; + for (_iter1022 = this->partitionSpecs.begin(); _iter1022 != this->partitionSpecs.end(); ++_iter1022) { - xfer += oprot->writeString(_iter1010->first); - xfer += oprot->writeString(_iter1010->second); + xfer += oprot->writeString(_iter1022->first); + xfer += oprot->writeString(_iter1022->second); } xfer += oprot->writeMapEnd(); } @@ -13071,11 +13071,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1011; - for (_iter1011 = (*(this->partitionSpecs)).begin(); _iter1011 != (*(this->partitionSpecs)).end(); ++_iter1011) + std::map ::const_iterator _iter1023; + for (_iter1023 = (*(this->partitionSpecs)).begin(); _iter1023 != (*(this->partitionSpecs)).end(); ++_iter1023) { - xfer += oprot->writeString(_iter1011->first); - xfer += oprot->writeString(_iter1011->second); + xfer += oprot->writeString(_iter1023->first); + xfer += oprot->writeString(_iter1023->second); } xfer += oprot->writeMapEnd(); } @@ -13132,14 +13132,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1012; - ::apache::thrift::protocol::TType _etype1015; - xfer += iprot->readListBegin(_etype1015, _size1012); - this->success.resize(_size1012); - uint32_t _i1016; - for (_i1016 = 0; _i1016 < _size1012; ++_i1016) + uint32_t _size1024; + ::apache::thrift::protocol::TType _etype1027; + xfer += iprot->readListBegin(_etype1027, _size1024); + this->success.resize(_size1024); + uint32_t _i1028; + for (_i1028 = 0; _i1028 < _size1024; ++_i1028) { - xfer += this->success[_i1016].read(iprot); + xfer += this->success[_i1028].read(iprot); } xfer += iprot->readListEnd(); } @@ -13202,10 +13202,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1017; - for (_iter1017 = this->success.begin(); _iter1017 != this->success.end(); ++_iter1017) + std::vector ::const_iterator _iter1029; + for (_iter1029 = this->success.begin(); _iter1029 != this->success.end(); ++_iter1029) { - xfer += (*_iter1017).write(oprot); + xfer += (*_iter1029).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13262,14 +13262,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1018; - ::apache::thrift::protocol::TType _etype1021; - xfer += iprot->readListBegin(_etype1021, _size1018); - (*(this->success)).resize(_size1018); - uint32_t _i1022; - for (_i1022 = 0; _i1022 < _size1018; ++_i1022) + uint32_t _size1030; + ::apache::thrift::protocol::TType _etype1033; + xfer += iprot->readListBegin(_etype1033, _size1030); + (*(this->success)).resize(_size1030); + uint32_t _i1034; + for (_i1034 = 0; _i1034 < _size1030; ++_i1034) { - xfer += (*(this->success))[_i1022].read(iprot); + xfer += (*(this->success))[_i1034].read(iprot); } xfer += iprot->readListEnd(); } @@ -13368,14 +13368,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1023; - ::apache::thrift::protocol::TType _etype1026; - xfer += iprot->readListBegin(_etype1026, _size1023); - this->part_vals.resize(_size1023); - uint32_t _i1027; - for (_i1027 = 0; _i1027 < _size1023; ++_i1027) + uint32_t _size1035; + ::apache::thrift::protocol::TType _etype1038; + xfer += iprot->readListBegin(_etype1038, _size1035); + this->part_vals.resize(_size1035); + uint32_t _i1039; + for (_i1039 = 0; _i1039 < _size1035; ++_i1039) { - xfer += iprot->readString(this->part_vals[_i1027]); + xfer += iprot->readString(this->part_vals[_i1039]); } xfer += iprot->readListEnd(); } @@ -13396,14 +13396,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1028; - ::apache::thrift::protocol::TType _etype1031; - xfer += iprot->readListBegin(_etype1031, _size1028); - this->group_names.resize(_size1028); - uint32_t _i1032; - for (_i1032 = 0; _i1032 < _size1028; ++_i1032) + uint32_t _size1040; + ::apache::thrift::protocol::TType _etype1043; + xfer += iprot->readListBegin(_etype1043, _size1040); + this->group_names.resize(_size1040); + uint32_t _i1044; + for (_i1044 = 0; _i1044 < _size1040; ++_i1044) { - xfer += iprot->readString(this->group_names[_i1032]); + xfer += iprot->readString(this->group_names[_i1044]); } xfer += iprot->readListEnd(); } @@ -13440,10 +13440,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1033; - for (_iter1033 = this->part_vals.begin(); _iter1033 != this->part_vals.end(); ++_iter1033) + std::vector ::const_iterator _iter1045; + for (_iter1045 = this->part_vals.begin(); _iter1045 != this->part_vals.end(); ++_iter1045) { - xfer += oprot->writeString((*_iter1033)); + xfer += oprot->writeString((*_iter1045)); } xfer += oprot->writeListEnd(); } @@ -13456,10 +13456,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1034; - for (_iter1034 = this->group_names.begin(); _iter1034 != this->group_names.end(); ++_iter1034) + std::vector ::const_iterator _iter1046; + for (_iter1046 = this->group_names.begin(); _iter1046 != this->group_names.end(); ++_iter1046) { - xfer += oprot->writeString((*_iter1034)); + xfer += oprot->writeString((*_iter1046)); } xfer += oprot->writeListEnd(); } @@ -13491,10 +13491,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1035; - for (_iter1035 = (*(this->part_vals)).begin(); _iter1035 != (*(this->part_vals)).end(); ++_iter1035) + std::vector ::const_iterator _iter1047; + for (_iter1047 = (*(this->part_vals)).begin(); _iter1047 != (*(this->part_vals)).end(); ++_iter1047) { - xfer += oprot->writeString((*_iter1035)); + xfer += oprot->writeString((*_iter1047)); } xfer += oprot->writeListEnd(); } @@ -13507,10 +13507,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1036; - for (_iter1036 = (*(this->group_names)).begin(); _iter1036 != (*(this->group_names)).end(); ++_iter1036) + std::vector ::const_iterator _iter1048; + for (_iter1048 = (*(this->group_names)).begin(); _iter1048 != (*(this->group_names)).end(); ++_iter1048) { - xfer += oprot->writeString((*_iter1036)); + xfer += oprot->writeString((*_iter1048)); } xfer += oprot->writeListEnd(); } @@ -14069,14 +14069,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1037; - ::apache::thrift::protocol::TType _etype1040; - xfer += iprot->readListBegin(_etype1040, _size1037); - this->success.resize(_size1037); - uint32_t _i1041; - for (_i1041 = 0; _i1041 < _size1037; ++_i1041) + uint32_t _size1049; + ::apache::thrift::protocol::TType _etype1052; + xfer += iprot->readListBegin(_etype1052, _size1049); + this->success.resize(_size1049); + uint32_t _i1053; + for (_i1053 = 0; _i1053 < _size1049; ++_i1053) { - xfer += this->success[_i1041].read(iprot); + xfer += this->success[_i1053].read(iprot); } xfer += iprot->readListEnd(); } @@ -14123,10 +14123,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1042; - for (_iter1042 = this->success.begin(); _iter1042 != this->success.end(); ++_iter1042) + std::vector ::const_iterator _iter1054; + for (_iter1054 = this->success.begin(); _iter1054 != this->success.end(); ++_iter1054) { - xfer += (*_iter1042).write(oprot); + xfer += (*_iter1054).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14175,14 +14175,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1043; - ::apache::thrift::protocol::TType _etype1046; - xfer += iprot->readListBegin(_etype1046, _size1043); - (*(this->success)).resize(_size1043); - uint32_t _i1047; - for (_i1047 = 0; _i1047 < _size1043; ++_i1047) + uint32_t _size1055; + ::apache::thrift::protocol::TType _etype1058; + xfer += iprot->readListBegin(_etype1058, _size1055); + (*(this->success)).resize(_size1055); + uint32_t _i1059; + for (_i1059 = 0; _i1059 < _size1055; ++_i1059) { - xfer += (*(this->success))[_i1047].read(iprot); + xfer += (*(this->success))[_i1059].read(iprot); } xfer += iprot->readListEnd(); } @@ -14281,14 +14281,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1048; - ::apache::thrift::protocol::TType _etype1051; - xfer += iprot->readListBegin(_etype1051, _size1048); - this->group_names.resize(_size1048); - uint32_t _i1052; - for (_i1052 = 0; _i1052 < _size1048; ++_i1052) + uint32_t _size1060; + ::apache::thrift::protocol::TType _etype1063; + xfer += iprot->readListBegin(_etype1063, _size1060); + this->group_names.resize(_size1060); + uint32_t _i1064; + for (_i1064 = 0; _i1064 < _size1060; ++_i1064) { - xfer += iprot->readString(this->group_names[_i1052]); + xfer += iprot->readString(this->group_names[_i1064]); } xfer += iprot->readListEnd(); } @@ -14333,10 +14333,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1053; - for (_iter1053 = this->group_names.begin(); _iter1053 != this->group_names.end(); ++_iter1053) + std::vector ::const_iterator _iter1065; + for (_iter1065 = this->group_names.begin(); _iter1065 != this->group_names.end(); ++_iter1065) { - xfer += oprot->writeString((*_iter1053)); + xfer += oprot->writeString((*_iter1065)); } xfer += oprot->writeListEnd(); } @@ -14376,10 +14376,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1054; - for (_iter1054 = (*(this->group_names)).begin(); _iter1054 != (*(this->group_names)).end(); ++_iter1054) + std::vector ::const_iterator _iter1066; + for (_iter1066 = (*(this->group_names)).begin(); _iter1066 != (*(this->group_names)).end(); ++_iter1066) { - xfer += oprot->writeString((*_iter1054)); + xfer += oprot->writeString((*_iter1066)); } xfer += oprot->writeListEnd(); } @@ -14420,14 +14420,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1055; - ::apache::thrift::protocol::TType _etype1058; - xfer += iprot->readListBegin(_etype1058, _size1055); - this->success.resize(_size1055); - uint32_t _i1059; - for (_i1059 = 0; _i1059 < _size1055; ++_i1059) + uint32_t _size1067; + ::apache::thrift::protocol::TType _etype1070; + xfer += iprot->readListBegin(_etype1070, _size1067); + this->success.resize(_size1067); + uint32_t _i1071; + for (_i1071 = 0; _i1071 < _size1067; ++_i1071) { - xfer += this->success[_i1059].read(iprot); + xfer += this->success[_i1071].read(iprot); } xfer += iprot->readListEnd(); } @@ -14474,10 +14474,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1060; - for (_iter1060 = this->success.begin(); _iter1060 != this->success.end(); ++_iter1060) + std::vector ::const_iterator _iter1072; + for (_iter1072 = this->success.begin(); _iter1072 != this->success.end(); ++_iter1072) { - xfer += (*_iter1060).write(oprot); + xfer += (*_iter1072).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14526,14 +14526,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1061; - ::apache::thrift::protocol::TType _etype1064; - xfer += iprot->readListBegin(_etype1064, _size1061); - (*(this->success)).resize(_size1061); - uint32_t _i1065; - for (_i1065 = 0; _i1065 < _size1061; ++_i1065) + uint32_t _size1073; + ::apache::thrift::protocol::TType _etype1076; + xfer += iprot->readListBegin(_etype1076, _size1073); + (*(this->success)).resize(_size1073); + uint32_t _i1077; + for (_i1077 = 0; _i1077 < _size1073; ++_i1077) { - xfer += (*(this->success))[_i1065].read(iprot); + xfer += (*(this->success))[_i1077].read(iprot); } xfer += iprot->readListEnd(); } @@ -14711,14 +14711,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1066; - ::apache::thrift::protocol::TType _etype1069; - xfer += iprot->readListBegin(_etype1069, _size1066); - this->success.resize(_size1066); - uint32_t _i1070; - for (_i1070 = 0; _i1070 < _size1066; ++_i1070) + uint32_t _size1078; + ::apache::thrift::protocol::TType _etype1081; + xfer += iprot->readListBegin(_etype1081, _size1078); + this->success.resize(_size1078); + uint32_t _i1082; + for (_i1082 = 0; _i1082 < _size1078; ++_i1082) { - xfer += this->success[_i1070].read(iprot); + xfer += this->success[_i1082].read(iprot); } xfer += iprot->readListEnd(); } @@ -14765,10 +14765,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1071; - for (_iter1071 = this->success.begin(); _iter1071 != this->success.end(); ++_iter1071) + std::vector ::const_iterator _iter1083; + for (_iter1083 = this->success.begin(); _iter1083 != this->success.end(); ++_iter1083) { - xfer += (*_iter1071).write(oprot); + xfer += (*_iter1083).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14817,14 +14817,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1072; - ::apache::thrift::protocol::TType _etype1075; - xfer += iprot->readListBegin(_etype1075, _size1072); - (*(this->success)).resize(_size1072); - uint32_t _i1076; - for (_i1076 = 0; _i1076 < _size1072; ++_i1076) + uint32_t _size1084; + ::apache::thrift::protocol::TType _etype1087; + xfer += iprot->readListBegin(_etype1087, _size1084); + (*(this->success)).resize(_size1084); + uint32_t _i1088; + for (_i1088 = 0; _i1088 < _size1084; ++_i1088) { - xfer += (*(this->success))[_i1076].read(iprot); + xfer += (*(this->success))[_i1088].read(iprot); } xfer += iprot->readListEnd(); } @@ -15002,14 +15002,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1077; - ::apache::thrift::protocol::TType _etype1080; - xfer += iprot->readListBegin(_etype1080, _size1077); - this->success.resize(_size1077); - uint32_t _i1081; - for (_i1081 = 0; _i1081 < _size1077; ++_i1081) + uint32_t _size1089; + ::apache::thrift::protocol::TType _etype1092; + xfer += iprot->readListBegin(_etype1092, _size1089); + this->success.resize(_size1089); + uint32_t _i1093; + for (_i1093 = 0; _i1093 < _size1089; ++_i1093) { - xfer += iprot->readString(this->success[_i1081]); + xfer += iprot->readString(this->success[_i1093]); } xfer += iprot->readListEnd(); } @@ -15048,10 +15048,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1082; - for (_iter1082 = this->success.begin(); _iter1082 != this->success.end(); ++_iter1082) + std::vector ::const_iterator _iter1094; + for (_iter1094 = this->success.begin(); _iter1094 != this->success.end(); ++_iter1094) { - xfer += oprot->writeString((*_iter1082)); + xfer += oprot->writeString((*_iter1094)); } xfer += oprot->writeListEnd(); } @@ -15096,14 +15096,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1083; - ::apache::thrift::protocol::TType _etype1086; - xfer += iprot->readListBegin(_etype1086, _size1083); - (*(this->success)).resize(_size1083); - uint32_t _i1087; - for (_i1087 = 0; _i1087 < _size1083; ++_i1087) + uint32_t _size1095; + ::apache::thrift::protocol::TType _etype1098; + xfer += iprot->readListBegin(_etype1098, _size1095); + (*(this->success)).resize(_size1095); + uint32_t _i1099; + for (_i1099 = 0; _i1099 < _size1095; ++_i1099) { - xfer += iprot->readString((*(this->success))[_i1087]); + xfer += iprot->readString((*(this->success))[_i1099]); } xfer += iprot->readListEnd(); } @@ -15178,14 +15178,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1088; - ::apache::thrift::protocol::TType _etype1091; - xfer += iprot->readListBegin(_etype1091, _size1088); - this->part_vals.resize(_size1088); - uint32_t _i1092; - for (_i1092 = 0; _i1092 < _size1088; ++_i1092) + uint32_t _size1100; + ::apache::thrift::protocol::TType _etype1103; + xfer += iprot->readListBegin(_etype1103, _size1100); + this->part_vals.resize(_size1100); + uint32_t _i1104; + for (_i1104 = 0; _i1104 < _size1100; ++_i1104) { - xfer += iprot->readString(this->part_vals[_i1092]); + xfer += iprot->readString(this->part_vals[_i1104]); } xfer += iprot->readListEnd(); } @@ -15230,10 +15230,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1093; - for (_iter1093 = this->part_vals.begin(); _iter1093 != this->part_vals.end(); ++_iter1093) + std::vector ::const_iterator _iter1105; + for (_iter1105 = this->part_vals.begin(); _iter1105 != this->part_vals.end(); ++_iter1105) { - xfer += oprot->writeString((*_iter1093)); + xfer += oprot->writeString((*_iter1105)); } xfer += oprot->writeListEnd(); } @@ -15269,10 +15269,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1094; - for (_iter1094 = (*(this->part_vals)).begin(); _iter1094 != (*(this->part_vals)).end(); ++_iter1094) + std::vector ::const_iterator _iter1106; + for (_iter1106 = (*(this->part_vals)).begin(); _iter1106 != (*(this->part_vals)).end(); ++_iter1106) { - xfer += oprot->writeString((*_iter1094)); + xfer += oprot->writeString((*_iter1106)); } xfer += oprot->writeListEnd(); } @@ -15317,14 +15317,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1095; - ::apache::thrift::protocol::TType _etype1098; - xfer += iprot->readListBegin(_etype1098, _size1095); - this->success.resize(_size1095); - uint32_t _i1099; - for (_i1099 = 0; _i1099 < _size1095; ++_i1099) + uint32_t _size1107; + ::apache::thrift::protocol::TType _etype1110; + xfer += iprot->readListBegin(_etype1110, _size1107); + this->success.resize(_size1107); + uint32_t _i1111; + for (_i1111 = 0; _i1111 < _size1107; ++_i1111) { - xfer += this->success[_i1099].read(iprot); + xfer += this->success[_i1111].read(iprot); } xfer += iprot->readListEnd(); } @@ -15371,10 +15371,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1100; - for (_iter1100 = this->success.begin(); _iter1100 != this->success.end(); ++_iter1100) + std::vector ::const_iterator _iter1112; + for (_iter1112 = this->success.begin(); _iter1112 != this->success.end(); ++_iter1112) { - xfer += (*_iter1100).write(oprot); + xfer += (*_iter1112).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15423,14 +15423,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1101; - ::apache::thrift::protocol::TType _etype1104; - xfer += iprot->readListBegin(_etype1104, _size1101); - (*(this->success)).resize(_size1101); - uint32_t _i1105; - for (_i1105 = 0; _i1105 < _size1101; ++_i1105) + uint32_t _size1113; + ::apache::thrift::protocol::TType _etype1116; + xfer += iprot->readListBegin(_etype1116, _size1113); + (*(this->success)).resize(_size1113); + uint32_t _i1117; + for (_i1117 = 0; _i1117 < _size1113; ++_i1117) { - xfer += (*(this->success))[_i1105].read(iprot); + xfer += (*(this->success))[_i1117].read(iprot); } xfer += iprot->readListEnd(); } @@ -15513,14 +15513,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1106; - ::apache::thrift::protocol::TType _etype1109; - xfer += iprot->readListBegin(_etype1109, _size1106); - this->part_vals.resize(_size1106); - uint32_t _i1110; - for (_i1110 = 0; _i1110 < _size1106; ++_i1110) + uint32_t _size1118; + ::apache::thrift::protocol::TType _etype1121; + xfer += iprot->readListBegin(_etype1121, _size1118); + this->part_vals.resize(_size1118); + uint32_t _i1122; + for (_i1122 = 0; _i1122 < _size1118; ++_i1122) { - xfer += iprot->readString(this->part_vals[_i1110]); + xfer += iprot->readString(this->part_vals[_i1122]); } xfer += iprot->readListEnd(); } @@ -15549,14 +15549,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1111; - ::apache::thrift::protocol::TType _etype1114; - xfer += iprot->readListBegin(_etype1114, _size1111); - this->group_names.resize(_size1111); - uint32_t _i1115; - for (_i1115 = 0; _i1115 < _size1111; ++_i1115) + uint32_t _size1123; + ::apache::thrift::protocol::TType _etype1126; + xfer += iprot->readListBegin(_etype1126, _size1123); + this->group_names.resize(_size1123); + uint32_t _i1127; + for (_i1127 = 0; _i1127 < _size1123; ++_i1127) { - xfer += iprot->readString(this->group_names[_i1115]); + xfer += iprot->readString(this->group_names[_i1127]); } xfer += iprot->readListEnd(); } @@ -15593,10 +15593,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1116; - for (_iter1116 = this->part_vals.begin(); _iter1116 != this->part_vals.end(); ++_iter1116) + std::vector ::const_iterator _iter1128; + for (_iter1128 = this->part_vals.begin(); _iter1128 != this->part_vals.end(); ++_iter1128) { - xfer += oprot->writeString((*_iter1116)); + xfer += oprot->writeString((*_iter1128)); } xfer += oprot->writeListEnd(); } @@ -15613,10 +15613,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1117; - for (_iter1117 = this->group_names.begin(); _iter1117 != this->group_names.end(); ++_iter1117) + std::vector ::const_iterator _iter1129; + for (_iter1129 = this->group_names.begin(); _iter1129 != this->group_names.end(); ++_iter1129) { - xfer += oprot->writeString((*_iter1117)); + xfer += oprot->writeString((*_iter1129)); } xfer += oprot->writeListEnd(); } @@ -15648,10 +15648,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1118; - for (_iter1118 = (*(this->part_vals)).begin(); _iter1118 != (*(this->part_vals)).end(); ++_iter1118) + std::vector ::const_iterator _iter1130; + for (_iter1130 = (*(this->part_vals)).begin(); _iter1130 != (*(this->part_vals)).end(); ++_iter1130) { - xfer += oprot->writeString((*_iter1118)); + xfer += oprot->writeString((*_iter1130)); } xfer += oprot->writeListEnd(); } @@ -15668,10 +15668,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1119; - for (_iter1119 = (*(this->group_names)).begin(); _iter1119 != (*(this->group_names)).end(); ++_iter1119) + std::vector ::const_iterator _iter1131; + for (_iter1131 = (*(this->group_names)).begin(); _iter1131 != (*(this->group_names)).end(); ++_iter1131) { - xfer += oprot->writeString((*_iter1119)); + xfer += oprot->writeString((*_iter1131)); } xfer += oprot->writeListEnd(); } @@ -15712,14 +15712,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1120; - ::apache::thrift::protocol::TType _etype1123; - xfer += iprot->readListBegin(_etype1123, _size1120); - this->success.resize(_size1120); - uint32_t _i1124; - for (_i1124 = 0; _i1124 < _size1120; ++_i1124) + uint32_t _size1132; + ::apache::thrift::protocol::TType _etype1135; + xfer += iprot->readListBegin(_etype1135, _size1132); + this->success.resize(_size1132); + uint32_t _i1136; + for (_i1136 = 0; _i1136 < _size1132; ++_i1136) { - xfer += this->success[_i1124].read(iprot); + xfer += this->success[_i1136].read(iprot); } xfer += iprot->readListEnd(); } @@ -15766,10 +15766,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1125; - for (_iter1125 = this->success.begin(); _iter1125 != this->success.end(); ++_iter1125) + std::vector ::const_iterator _iter1137; + for (_iter1137 = this->success.begin(); _iter1137 != this->success.end(); ++_iter1137) { - xfer += (*_iter1125).write(oprot); + xfer += (*_iter1137).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15818,243 +15818,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1126; - ::apache::thrift::protocol::TType _etype1129; - xfer += iprot->readListBegin(_etype1129, _size1126); - (*(this->success)).resize(_size1126); - uint32_t _i1130; - for (_i1130 = 0; _i1130 < _size1126; ++_i1130) - { - xfer += (*(this->success))[_i1130].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - - -ThriftHiveMetastore_get_partition_names_ps_args::~ThriftHiveMetastore_get_partition_names_ps_args() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tbl_name); - this->__isset.tbl_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->part_vals.clear(); - uint32_t _size1131; - ::apache::thrift::protocol::TType _etype1134; - xfer += iprot->readListBegin(_etype1134, _size1131); - this->part_vals.resize(_size1131); - uint32_t _i1135; - for (_i1135 = 0; _i1135 < _size1131; ++_i1135) - { - xfer += iprot->readString(this->part_vals[_i1135]); - } - xfer += iprot->readListEnd(); - } - this->__isset.part_vals = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_I16) { - xfer += iprot->readI16(this->max_parts); - this->__isset.max_parts = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tbl_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1136; - for (_iter1136 = this->part_vals.begin(); _iter1136 != this->part_vals.end(); ++_iter1136) - { - xfer += oprot->writeString((*_iter1136)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); - xfer += oprot->writeI16(this->max_parts); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_partition_names_ps_pargs::~ThriftHiveMetastore_get_partition_names_ps_pargs() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { - uint32_t xfer = 0; - apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->tbl_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1137; - for (_iter1137 = (*(this->part_vals)).begin(); _iter1137 != (*(this->part_vals)).end(); ++_iter1137) - { - xfer += oprot->writeString((*_iter1137)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); - xfer += oprot->writeI16((*(this->max_parts))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_partition_names_ps_result::~ThriftHiveMetastore_get_partition_names_ps_result() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->success.clear(); uint32_t _size1138; ::apache::thrift::protocol::TType _etype1141; xfer += iprot->readListBegin(_etype1141, _size1138); - this->success.resize(_size1138); + (*(this->success)).resize(_size1138); uint32_t _i1142; for (_i1142 = 0; _i1142 < _size1138; ++_i1142) { - xfer += iprot->readString(this->success[_i1142]); + xfer += (*(this->success))[_i1142].read(iprot); } xfer += iprot->readListEnd(); } @@ -16091,118 +15862,12 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1143; - for (_iter1143 = this->success.begin(); _iter1143 != this->success.end(); ++_iter1143) - { - xfer += oprot->writeString((*_iter1143)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o2) { - xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); - xfer += this->o2.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - - -ThriftHiveMetastore_get_partition_names_ps_presult::~ThriftHiveMetastore_get_partition_names_ps_presult() throw() { -} - - -uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); - uint32_t _size1144; - ::apache::thrift::protocol::TType _etype1147; - xfer += iprot->readListBegin(_etype1147, _size1144); - (*(this->success)).resize(_size1144); - uint32_t _i1148; - for (_i1148 = 0; _i1148 < _size1144; ++_i1148) - { - xfer += iprot->readString((*(this->success))[_i1148]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o2.read(iprot); - this->__isset.o2 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - xfer += iprot->readStructEnd(); - - return xfer; -} - - -ThriftHiveMetastore_get_partitions_by_filter_args::~ThriftHiveMetastore_get_partitions_by_filter_args() throw() { +ThriftHiveMetastore_get_partition_names_ps_args::~ThriftHiveMetastore_get_partition_names_ps_args() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -16240,9 +15905,21 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrif } break; case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->filter); - this->__isset.filter = true; + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->part_vals.clear(); + uint32_t _size1143; + ::apache::thrift::protocol::TType _etype1146; + xfer += iprot->readListBegin(_etype1146, _size1143); + this->part_vals.resize(_size1143); + uint32_t _i1147; + for (_i1147 = 0; _i1147 < _size1143; ++_i1147) + { + xfer += iprot->readString(this->part_vals[_i1147]); + } + xfer += iprot->readListEnd(); + } + this->__isset.part_vals = true; } else { xfer += iprot->skip(ftype); } @@ -16267,10 +15944,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -16280,8 +15957,16 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thri xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->filter); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); + std::vector ::const_iterator _iter1148; + for (_iter1148 = this->part_vals.begin(); _iter1148 != this->part_vals.end(); ++_iter1148) + { + xfer += oprot->writeString((*_iter1148)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); @@ -16294,14 +15979,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thri } -ThriftHiveMetastore_get_partitions_by_filter_pargs::~ThriftHiveMetastore_get_partitions_by_filter_pargs() throw() { +ThriftHiveMetastore_get_partition_names_ps_pargs::~ThriftHiveMetastore_get_partition_names_ps_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -16311,8 +15996,16 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thr xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString((*(this->filter))); + xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); + std::vector ::const_iterator _iter1149; + for (_iter1149 = (*(this->part_vals)).begin(); _iter1149 != (*(this->part_vals)).end(); ++_iter1149) + { + xfer += oprot->writeString((*_iter1149)); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); @@ -16325,11 +16018,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thr } -ThriftHiveMetastore_get_partitions_by_filter_result::~ThriftHiveMetastore_get_partitions_by_filter_result() throw() { +ThriftHiveMetastore_get_partition_names_ps_result::~ThriftHiveMetastore_get_partition_names_ps_result() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -16354,14 +16047,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1149; - ::apache::thrift::protocol::TType _etype1152; - xfer += iprot->readListBegin(_etype1152, _size1149); - this->success.resize(_size1149); - uint32_t _i1153; - for (_i1153 = 0; _i1153 < _size1149; ++_i1153) + uint32_t _size1150; + ::apache::thrift::protocol::TType _etype1153; + xfer += iprot->readListBegin(_etype1153, _size1150); + this->success.resize(_size1150); + uint32_t _i1154; + for (_i1154 = 0; _i1154 < _size1150; ++_i1154) { - xfer += this->success[_i1153].read(iprot); + xfer += iprot->readString(this->success[_i1154]); } xfer += iprot->readListEnd(); } @@ -16398,20 +16091,20 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_names_ps_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1154; - for (_iter1154 = this->success.begin(); _iter1154 != this->success.end(); ++_iter1154) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter1155; + for (_iter1155 = this->success.begin(); _iter1155 != this->success.end(); ++_iter1155) { - xfer += (*_iter1154).write(oprot); + xfer += oprot->writeString((*_iter1155)); } xfer += oprot->writeListEnd(); } @@ -16431,11 +16124,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th } -ThriftHiveMetastore_get_partitions_by_filter_presult::~ThriftHiveMetastore_get_partitions_by_filter_presult() throw() { +ThriftHiveMetastore_get_partition_names_ps_presult::~ThriftHiveMetastore_get_partition_names_ps_presult() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -16460,14 +16153,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1155; - ::apache::thrift::protocol::TType _etype1158; - xfer += iprot->readListBegin(_etype1158, _size1155); - (*(this->success)).resize(_size1155); - uint32_t _i1159; - for (_i1159 = 0; _i1159 < _size1155; ++_i1159) + uint32_t _size1156; + ::apache::thrift::protocol::TType _etype1159; + xfer += iprot->readListBegin(_etype1159, _size1156); + (*(this->success)).resize(_size1156); + uint32_t _i1160; + for (_i1160 = 0; _i1160 < _size1156; ++_i1160) { - xfer += (*(this->success))[_i1159].read(iprot); + xfer += iprot->readString((*(this->success))[_i1160]); } xfer += iprot->readListEnd(); } @@ -16505,11 +16198,11 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th } -ThriftHiveMetastore_get_part_specs_by_filter_args::~ThriftHiveMetastore_get_part_specs_by_filter_args() throw() { +ThriftHiveMetastore_get_partitions_by_filter_args::~ThriftHiveMetastore_get_partitions_by_filter_args() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -16555,8 +16248,8 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrif } break; case 4: - if (ftype == ::apache::thrift::protocol::T_I32) { - xfer += iprot->readI32(this->max_parts); + if (ftype == ::apache::thrift::protocol::T_I16) { + xfer += iprot->readI16(this->max_parts); this->__isset.max_parts = true; } else { xfer += iprot->skip(ftype); @@ -16574,10 +16267,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_args"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->db_name); @@ -16591,8 +16284,8 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thri xfer += oprot->writeString(this->filter); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); - xfer += oprot->writeI32(this->max_parts); + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16(this->max_parts); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -16601,14 +16294,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thri } -ThriftHiveMetastore_get_part_specs_by_filter_pargs::~ThriftHiveMetastore_get_part_specs_by_filter_pargs() throw() { +ThriftHiveMetastore_get_partitions_by_filter_pargs::~ThriftHiveMetastore_get_partitions_by_filter_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_pargs"); xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString((*(this->db_name))); @@ -16622,8 +16315,8 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thr xfer += oprot->writeString((*(this->filter))); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); - xfer += oprot->writeI32((*(this->max_parts))); + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I16, 4); + xfer += oprot->writeI16((*(this->max_parts))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -16632,11 +16325,11 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thr } -ThriftHiveMetastore_get_part_specs_by_filter_result::~ThriftHiveMetastore_get_part_specs_by_filter_result() throw() { +ThriftHiveMetastore_get_partitions_by_filter_result::~ThriftHiveMetastore_get_partitions_by_filter_result() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -16661,14 +16354,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1160; - ::apache::thrift::protocol::TType _etype1163; - xfer += iprot->readListBegin(_etype1163, _size1160); - this->success.resize(_size1160); - uint32_t _i1164; - for (_i1164 = 0; _i1164 < _size1160; ++_i1164) + uint32_t _size1161; + ::apache::thrift::protocol::TType _etype1164; + xfer += iprot->readListBegin(_etype1164, _size1161); + this->success.resize(_size1161); + uint32_t _i1165; + for (_i1165 = 0; _i1165 < _size1161; ++_i1165) { - xfer += this->success[_i1164].read(iprot); + xfer += this->success[_i1165].read(iprot); } xfer += iprot->readListEnd(); } @@ -16705,20 +16398,20 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr return xfer; } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_filter_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1165; - for (_iter1165 = this->success.begin(); _iter1165 != this->success.end(); ++_iter1165) + std::vector ::const_iterator _iter1166; + for (_iter1166 = this->success.begin(); _iter1166 != this->success.end(); ++_iter1166) { - xfer += (*_iter1165).write(oprot); + xfer += (*_iter1166).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16738,11 +16431,11 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th } -ThriftHiveMetastore_get_part_specs_by_filter_presult::~ThriftHiveMetastore_get_part_specs_by_filter_presult() throw() { +ThriftHiveMetastore_get_partitions_by_filter_presult::~ThriftHiveMetastore_get_partitions_by_filter_presult() throw() { } -uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -16767,14 +16460,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1166; - ::apache::thrift::protocol::TType _etype1169; - xfer += iprot->readListBegin(_etype1169, _size1166); - (*(this->success)).resize(_size1166); - uint32_t _i1170; - for (_i1170 = 0; _i1170 < _size1166; ++_i1170) + uint32_t _size1167; + ::apache::thrift::protocol::TType _etype1170; + xfer += iprot->readListBegin(_etype1170, _size1167); + (*(this->success)).resize(_size1167); + uint32_t _i1171; + for (_i1171 = 0; _i1171 < _size1167; ++_i1171) { - xfer += (*(this->success))[_i1170].read(iprot); + xfer += (*(this->success))[_i1171].read(iprot); } xfer += iprot->readListEnd(); } @@ -16812,11 +16505,11 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th } -ThriftHiveMetastore_get_partitions_by_expr_args::~ThriftHiveMetastore_get_partitions_by_expr_args() throw() { +ThriftHiveMetastore_get_part_specs_by_filter_args::~ThriftHiveMetastore_get_part_specs_by_filter_args() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -16838,9 +16531,33 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->req.read(iprot); - this->__isset.req = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->filter); + this->__isset.filter = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->max_parts); + this->__isset.max_parts = true; } else { xfer += iprot->skip(ftype); } @@ -16857,13 +16574,25 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift: return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_args"); - xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->req.write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->filter); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeI32(this->max_parts); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -16872,17 +16601,29 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift } -ThriftHiveMetastore_get_partitions_by_expr_pargs::~ThriftHiveMetastore_get_partitions_by_expr_pargs() throw() { +ThriftHiveMetastore_get_part_specs_by_filter_pargs::~ThriftHiveMetastore_get_part_specs_by_filter_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_pargs"); - xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("filter", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->filter))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("max_parts", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeI32((*(this->max_parts))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -16891,11 +16632,270 @@ uint32_t ThriftHiveMetastore_get_partitions_by_expr_pargs::write(::apache::thrif } -ThriftHiveMetastore_get_partitions_by_expr_result::~ThriftHiveMetastore_get_partitions_by_expr_result() throw() { +ThriftHiveMetastore_get_part_specs_by_filter_result::~ThriftHiveMetastore_get_part_specs_by_filter_result() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1172; + ::apache::thrift::protocol::TType _etype1175; + xfer += iprot->readListBegin(_etype1175, _size1172); + this->success.resize(_size1172); + uint32_t _i1176; + for (_i1176 = 0; _i1176 < _size1172; ++_i1176) + { + xfer += this->success[_i1176].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_part_specs_by_filter_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1177; + for (_iter1177 = this->success.begin(); _iter1177 != this->success.end(); ++_iter1177) + { + xfer += (*_iter1177).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_part_specs_by_filter_presult::~ThriftHiveMetastore_get_part_specs_by_filter_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1178; + ::apache::thrift::protocol::TType _etype1181; + xfer += iprot->readListBegin(_etype1181, _size1178); + (*(this->success)).resize(_size1178); + uint32_t _i1182; + for (_i1182 = 0; _i1182 < _size1178; ++_i1182) + { + xfer += (*(this->success))[_i1182].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_partitions_by_expr_args::~ThriftHiveMetastore_get_partitions_by_expr_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_partitions_by_expr_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_partitions_by_expr_pargs::~ThriftHiveMetastore_get_partitions_by_expr_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_partitions_by_expr_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_by_expr_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_partitions_by_expr_result::~ThriftHiveMetastore_get_partitions_by_expr_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_partitions_by_expr_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -17343,14 +17343,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1171; - ::apache::thrift::protocol::TType _etype1174; - xfer += iprot->readListBegin(_etype1174, _size1171); - this->names.resize(_size1171); - uint32_t _i1175; - for (_i1175 = 0; _i1175 < _size1171; ++_i1175) + uint32_t _size1183; + ::apache::thrift::protocol::TType _etype1186; + xfer += iprot->readListBegin(_etype1186, _size1183); + this->names.resize(_size1183); + uint32_t _i1187; + for (_i1187 = 0; _i1187 < _size1183; ++_i1187) { - xfer += iprot->readString(this->names[_i1175]); + xfer += iprot->readString(this->names[_i1187]); } xfer += iprot->readListEnd(); } @@ -17387,10 +17387,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1176; - for (_iter1176 = this->names.begin(); _iter1176 != this->names.end(); ++_iter1176) + std::vector ::const_iterator _iter1188; + for (_iter1188 = this->names.begin(); _iter1188 != this->names.end(); ++_iter1188) { - xfer += oprot->writeString((*_iter1176)); + xfer += oprot->writeString((*_iter1188)); } xfer += oprot->writeListEnd(); } @@ -17422,10 +17422,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1177; - for (_iter1177 = (*(this->names)).begin(); _iter1177 != (*(this->names)).end(); ++_iter1177) + std::vector ::const_iterator _iter1189; + for (_iter1189 = (*(this->names)).begin(); _iter1189 != (*(this->names)).end(); ++_iter1189) { - xfer += oprot->writeString((*_iter1177)); + xfer += oprot->writeString((*_iter1189)); } xfer += oprot->writeListEnd(); } @@ -17466,14 +17466,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1178; - ::apache::thrift::protocol::TType _etype1181; - xfer += iprot->readListBegin(_etype1181, _size1178); - this->success.resize(_size1178); - uint32_t _i1182; - for (_i1182 = 0; _i1182 < _size1178; ++_i1182) + uint32_t _size1190; + ::apache::thrift::protocol::TType _etype1193; + xfer += iprot->readListBegin(_etype1193, _size1190); + this->success.resize(_size1190); + uint32_t _i1194; + for (_i1194 = 0; _i1194 < _size1190; ++_i1194) { - xfer += this->success[_i1182].read(iprot); + xfer += this->success[_i1194].read(iprot); } xfer += iprot->readListEnd(); } @@ -17520,10 +17520,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1183; - for (_iter1183 = this->success.begin(); _iter1183 != this->success.end(); ++_iter1183) + std::vector ::const_iterator _iter1195; + for (_iter1195 = this->success.begin(); _iter1195 != this->success.end(); ++_iter1195) { - xfer += (*_iter1183).write(oprot); + xfer += (*_iter1195).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17572,14 +17572,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1184; - ::apache::thrift::protocol::TType _etype1187; - xfer += iprot->readListBegin(_etype1187, _size1184); - (*(this->success)).resize(_size1184); - uint32_t _i1188; - for (_i1188 = 0; _i1188 < _size1184; ++_i1188) + uint32_t _size1196; + ::apache::thrift::protocol::TType _etype1199; + xfer += iprot->readListBegin(_etype1199, _size1196); + (*(this->success)).resize(_size1196); + uint32_t _i1200; + for (_i1200 = 0; _i1200 < _size1196; ++_i1200) { - xfer += (*(this->success))[_i1188].read(iprot); + xfer += (*(this->success))[_i1200].read(iprot); } xfer += iprot->readListEnd(); } @@ -17901,14 +17901,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1189; - ::apache::thrift::protocol::TType _etype1192; - xfer += iprot->readListBegin(_etype1192, _size1189); - this->new_parts.resize(_size1189); - uint32_t _i1193; - for (_i1193 = 0; _i1193 < _size1189; ++_i1193) + uint32_t _size1201; + ::apache::thrift::protocol::TType _etype1204; + xfer += iprot->readListBegin(_etype1204, _size1201); + this->new_parts.resize(_size1201); + uint32_t _i1205; + for (_i1205 = 0; _i1205 < _size1201; ++_i1205) { - xfer += this->new_parts[_i1193].read(iprot); + xfer += this->new_parts[_i1205].read(iprot); } xfer += iprot->readListEnd(); } @@ -17945,10 +17945,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1194; - for (_iter1194 = this->new_parts.begin(); _iter1194 != this->new_parts.end(); ++_iter1194) + std::vector ::const_iterator _iter1206; + for (_iter1206 = this->new_parts.begin(); _iter1206 != this->new_parts.end(); ++_iter1206) { - xfer += (*_iter1194).write(oprot); + xfer += (*_iter1206).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17980,10 +17980,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1195; - for (_iter1195 = (*(this->new_parts)).begin(); _iter1195 != (*(this->new_parts)).end(); ++_iter1195) + std::vector ::const_iterator _iter1207; + for (_iter1207 = (*(this->new_parts)).begin(); _iter1207 != (*(this->new_parts)).end(); ++_iter1207) { - xfer += (*_iter1195).write(oprot); + xfer += (*_iter1207).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18168,14 +18168,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1196; - ::apache::thrift::protocol::TType _etype1199; - xfer += iprot->readListBegin(_etype1199, _size1196); - this->new_parts.resize(_size1196); - uint32_t _i1200; - for (_i1200 = 0; _i1200 < _size1196; ++_i1200) + uint32_t _size1208; + ::apache::thrift::protocol::TType _etype1211; + xfer += iprot->readListBegin(_etype1211, _size1208); + this->new_parts.resize(_size1208); + uint32_t _i1212; + for (_i1212 = 0; _i1212 < _size1208; ++_i1212) { - xfer += this->new_parts[_i1200].read(iprot); + xfer += this->new_parts[_i1212].read(iprot); } xfer += iprot->readListEnd(); } @@ -18220,10 +18220,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1201; - for (_iter1201 = this->new_parts.begin(); _iter1201 != this->new_parts.end(); ++_iter1201) + std::vector ::const_iterator _iter1213; + for (_iter1213 = this->new_parts.begin(); _iter1213 != this->new_parts.end(); ++_iter1213) { - xfer += (*_iter1201).write(oprot); + xfer += (*_iter1213).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18259,10 +18259,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1202; - for (_iter1202 = (*(this->new_parts)).begin(); _iter1202 != (*(this->new_parts)).end(); ++_iter1202) + std::vector ::const_iterator _iter1214; + for (_iter1214 = (*(this->new_parts)).begin(); _iter1214 != (*(this->new_parts)).end(); ++_iter1214) { - xfer += (*_iter1202).write(oprot); + xfer += (*_iter1214).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18706,14 +18706,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1203; - ::apache::thrift::protocol::TType _etype1206; - xfer += iprot->readListBegin(_etype1206, _size1203); - this->part_vals.resize(_size1203); - uint32_t _i1207; - for (_i1207 = 0; _i1207 < _size1203; ++_i1207) + uint32_t _size1215; + ::apache::thrift::protocol::TType _etype1218; + xfer += iprot->readListBegin(_etype1218, _size1215); + this->part_vals.resize(_size1215); + uint32_t _i1219; + for (_i1219 = 0; _i1219 < _size1215; ++_i1219) { - xfer += iprot->readString(this->part_vals[_i1207]); + xfer += iprot->readString(this->part_vals[_i1219]); } xfer += iprot->readListEnd(); } @@ -18758,10 +18758,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1208; - for (_iter1208 = this->part_vals.begin(); _iter1208 != this->part_vals.end(); ++_iter1208) + std::vector ::const_iterator _iter1220; + for (_iter1220 = this->part_vals.begin(); _iter1220 != this->part_vals.end(); ++_iter1220) { - xfer += oprot->writeString((*_iter1208)); + xfer += oprot->writeString((*_iter1220)); } xfer += oprot->writeListEnd(); } @@ -18797,10 +18797,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1209; - for (_iter1209 = (*(this->part_vals)).begin(); _iter1209 != (*(this->part_vals)).end(); ++_iter1209) + std::vector ::const_iterator _iter1221; + for (_iter1221 = (*(this->part_vals)).begin(); _iter1221 != (*(this->part_vals)).end(); ++_iter1221) { - xfer += oprot->writeString((*_iter1209)); + xfer += oprot->writeString((*_iter1221)); } xfer += oprot->writeListEnd(); } @@ -18973,14 +18973,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1210; - ::apache::thrift::protocol::TType _etype1213; - xfer += iprot->readListBegin(_etype1213, _size1210); - this->part_vals.resize(_size1210); - uint32_t _i1214; - for (_i1214 = 0; _i1214 < _size1210; ++_i1214) + uint32_t _size1222; + ::apache::thrift::protocol::TType _etype1225; + xfer += iprot->readListBegin(_etype1225, _size1222); + this->part_vals.resize(_size1222); + uint32_t _i1226; + for (_i1226 = 0; _i1226 < _size1222; ++_i1226) { - xfer += iprot->readString(this->part_vals[_i1214]); + xfer += iprot->readString(this->part_vals[_i1226]); } xfer += iprot->readListEnd(); } @@ -19017,10 +19017,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1215; - for (_iter1215 = this->part_vals.begin(); _iter1215 != this->part_vals.end(); ++_iter1215) + std::vector ::const_iterator _iter1227; + for (_iter1227 = this->part_vals.begin(); _iter1227 != this->part_vals.end(); ++_iter1227) { - xfer += oprot->writeString((*_iter1215)); + xfer += oprot->writeString((*_iter1227)); } xfer += oprot->writeListEnd(); } @@ -19048,10 +19048,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1216; - for (_iter1216 = (*(this->part_vals)).begin(); _iter1216 != (*(this->part_vals)).end(); ++_iter1216) + std::vector ::const_iterator _iter1228; + for (_iter1228 = (*(this->part_vals)).begin(); _iter1228 != (*(this->part_vals)).end(); ++_iter1228) { - xfer += oprot->writeString((*_iter1216)); + xfer += oprot->writeString((*_iter1228)); } xfer += oprot->writeListEnd(); } @@ -19526,14 +19526,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1217; - ::apache::thrift::protocol::TType _etype1220; - xfer += iprot->readListBegin(_etype1220, _size1217); - this->success.resize(_size1217); - uint32_t _i1221; - for (_i1221 = 0; _i1221 < _size1217; ++_i1221) + uint32_t _size1229; + ::apache::thrift::protocol::TType _etype1232; + xfer += iprot->readListBegin(_etype1232, _size1229); + this->success.resize(_size1229); + uint32_t _i1233; + for (_i1233 = 0; _i1233 < _size1229; ++_i1233) { - xfer += iprot->readString(this->success[_i1221]); + xfer += iprot->readString(this->success[_i1233]); } xfer += iprot->readListEnd(); } @@ -19572,10 +19572,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1222; - for (_iter1222 = this->success.begin(); _iter1222 != this->success.end(); ++_iter1222) + std::vector ::const_iterator _iter1234; + for (_iter1234 = this->success.begin(); _iter1234 != this->success.end(); ++_iter1234) { - xfer += oprot->writeString((*_iter1222)); + xfer += oprot->writeString((*_iter1234)); } xfer += oprot->writeListEnd(); } @@ -19620,14 +19620,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1223; - ::apache::thrift::protocol::TType _etype1226; - xfer += iprot->readListBegin(_etype1226, _size1223); - (*(this->success)).resize(_size1223); - uint32_t _i1227; - for (_i1227 = 0; _i1227 < _size1223; ++_i1227) + uint32_t _size1235; + ::apache::thrift::protocol::TType _etype1238; + xfer += iprot->readListBegin(_etype1238, _size1235); + (*(this->success)).resize(_size1235); + uint32_t _i1239; + for (_i1239 = 0; _i1239 < _size1235; ++_i1239) { - xfer += iprot->readString((*(this->success))[_i1227]); + xfer += iprot->readString((*(this->success))[_i1239]); } xfer += iprot->readListEnd(); } @@ -19765,17 +19765,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1228; - ::apache::thrift::protocol::TType _ktype1229; - ::apache::thrift::protocol::TType _vtype1230; - xfer += iprot->readMapBegin(_ktype1229, _vtype1230, _size1228); - uint32_t _i1232; - for (_i1232 = 0; _i1232 < _size1228; ++_i1232) + uint32_t _size1240; + ::apache::thrift::protocol::TType _ktype1241; + ::apache::thrift::protocol::TType _vtype1242; + xfer += iprot->readMapBegin(_ktype1241, _vtype1242, _size1240); + uint32_t _i1244; + for (_i1244 = 0; _i1244 < _size1240; ++_i1244) { - std::string _key1233; - xfer += iprot->readString(_key1233); - std::string& _val1234 = this->success[_key1233]; - xfer += iprot->readString(_val1234); + std::string _key1245; + xfer += iprot->readString(_key1245); + std::string& _val1246 = this->success[_key1245]; + xfer += iprot->readString(_val1246); } xfer += iprot->readMapEnd(); } @@ -19814,11 +19814,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1235; - for (_iter1235 = this->success.begin(); _iter1235 != this->success.end(); ++_iter1235) + std::map ::const_iterator _iter1247; + for (_iter1247 = this->success.begin(); _iter1247 != this->success.end(); ++_iter1247) { - xfer += oprot->writeString(_iter1235->first); - xfer += oprot->writeString(_iter1235->second); + xfer += oprot->writeString(_iter1247->first); + xfer += oprot->writeString(_iter1247->second); } xfer += oprot->writeMapEnd(); } @@ -19863,17 +19863,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1236; - ::apache::thrift::protocol::TType _ktype1237; - ::apache::thrift::protocol::TType _vtype1238; - xfer += iprot->readMapBegin(_ktype1237, _vtype1238, _size1236); - uint32_t _i1240; - for (_i1240 = 0; _i1240 < _size1236; ++_i1240) + uint32_t _size1248; + ::apache::thrift::protocol::TType _ktype1249; + ::apache::thrift::protocol::TType _vtype1250; + xfer += iprot->readMapBegin(_ktype1249, _vtype1250, _size1248); + uint32_t _i1252; + for (_i1252 = 0; _i1252 < _size1248; ++_i1252) { - std::string _key1241; - xfer += iprot->readString(_key1241); - std::string& _val1242 = (*(this->success))[_key1241]; - xfer += iprot->readString(_val1242); + std::string _key1253; + xfer += iprot->readString(_key1253); + std::string& _val1254 = (*(this->success))[_key1253]; + xfer += iprot->readString(_val1254); } xfer += iprot->readMapEnd(); } @@ -19948,17 +19948,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1243; - ::apache::thrift::protocol::TType _ktype1244; - ::apache::thrift::protocol::TType _vtype1245; - xfer += iprot->readMapBegin(_ktype1244, _vtype1245, _size1243); - uint32_t _i1247; - for (_i1247 = 0; _i1247 < _size1243; ++_i1247) + uint32_t _size1255; + ::apache::thrift::protocol::TType _ktype1256; + ::apache::thrift::protocol::TType _vtype1257; + xfer += iprot->readMapBegin(_ktype1256, _vtype1257, _size1255); + uint32_t _i1259; + for (_i1259 = 0; _i1259 < _size1255; ++_i1259) { - std::string _key1248; - xfer += iprot->readString(_key1248); - std::string& _val1249 = this->part_vals[_key1248]; - xfer += iprot->readString(_val1249); + std::string _key1260; + xfer += iprot->readString(_key1260); + std::string& _val1261 = this->part_vals[_key1260]; + xfer += iprot->readString(_val1261); } xfer += iprot->readMapEnd(); } @@ -19969,9 +19969,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1250; - xfer += iprot->readI32(ecast1250); - this->eventType = (PartitionEventType::type)ecast1250; + int32_t ecast1262; + xfer += iprot->readI32(ecast1262); + this->eventType = (PartitionEventType::type)ecast1262; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -20005,11 +20005,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1251; - for (_iter1251 = this->part_vals.begin(); _iter1251 != this->part_vals.end(); ++_iter1251) + std::map ::const_iterator _iter1263; + for (_iter1263 = this->part_vals.begin(); _iter1263 != this->part_vals.end(); ++_iter1263) { - xfer += oprot->writeString(_iter1251->first); - xfer += oprot->writeString(_iter1251->second); + xfer += oprot->writeString(_iter1263->first); + xfer += oprot->writeString(_iter1263->second); } xfer += oprot->writeMapEnd(); } @@ -20045,11 +20045,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1252; - for (_iter1252 = (*(this->part_vals)).begin(); _iter1252 != (*(this->part_vals)).end(); ++_iter1252) + std::map ::const_iterator _iter1264; + for (_iter1264 = (*(this->part_vals)).begin(); _iter1264 != (*(this->part_vals)).end(); ++_iter1264) { - xfer += oprot->writeString(_iter1252->first); - xfer += oprot->writeString(_iter1252->second); + xfer += oprot->writeString(_iter1264->first); + xfer += oprot->writeString(_iter1264->second); } xfer += oprot->writeMapEnd(); } @@ -20318,17 +20318,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1253; - ::apache::thrift::protocol::TType _ktype1254; - ::apache::thrift::protocol::TType _vtype1255; - xfer += iprot->readMapBegin(_ktype1254, _vtype1255, _size1253); - uint32_t _i1257; - for (_i1257 = 0; _i1257 < _size1253; ++_i1257) + uint32_t _size1265; + ::apache::thrift::protocol::TType _ktype1266; + ::apache::thrift::protocol::TType _vtype1267; + xfer += iprot->readMapBegin(_ktype1266, _vtype1267, _size1265); + uint32_t _i1269; + for (_i1269 = 0; _i1269 < _size1265; ++_i1269) { - std::string _key1258; - xfer += iprot->readString(_key1258); - std::string& _val1259 = this->part_vals[_key1258]; - xfer += iprot->readString(_val1259); + std::string _key1270; + xfer += iprot->readString(_key1270); + std::string& _val1271 = this->part_vals[_key1270]; + xfer += iprot->readString(_val1271); } xfer += iprot->readMapEnd(); } @@ -20339,9 +20339,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1260; - xfer += iprot->readI32(ecast1260); - this->eventType = (PartitionEventType::type)ecast1260; + int32_t ecast1272; + xfer += iprot->readI32(ecast1272); + this->eventType = (PartitionEventType::type)ecast1272; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -20375,11 +20375,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1261; - for (_iter1261 = this->part_vals.begin(); _iter1261 != this->part_vals.end(); ++_iter1261) + std::map ::const_iterator _iter1273; + for (_iter1273 = this->part_vals.begin(); _iter1273 != this->part_vals.end(); ++_iter1273) { - xfer += oprot->writeString(_iter1261->first); - xfer += oprot->writeString(_iter1261->second); + xfer += oprot->writeString(_iter1273->first); + xfer += oprot->writeString(_iter1273->second); } xfer += oprot->writeMapEnd(); } @@ -20415,11 +20415,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1262; - for (_iter1262 = (*(this->part_vals)).begin(); _iter1262 != (*(this->part_vals)).end(); ++_iter1262) + std::map ::const_iterator _iter1274; + for (_iter1274 = (*(this->part_vals)).begin(); _iter1274 != (*(this->part_vals)).end(); ++_iter1274) { - xfer += oprot->writeString(_iter1262->first); - xfer += oprot->writeString(_iter1262->second); + xfer += oprot->writeString(_iter1274->first); + xfer += oprot->writeString(_iter1274->second); } xfer += oprot->writeMapEnd(); } @@ -21855,14 +21855,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1263; - ::apache::thrift::protocol::TType _etype1266; - xfer += iprot->readListBegin(_etype1266, _size1263); - this->success.resize(_size1263); - uint32_t _i1267; - for (_i1267 = 0; _i1267 < _size1263; ++_i1267) + uint32_t _size1275; + ::apache::thrift::protocol::TType _etype1278; + xfer += iprot->readListBegin(_etype1278, _size1275); + this->success.resize(_size1275); + uint32_t _i1279; + for (_i1279 = 0; _i1279 < _size1275; ++_i1279) { - xfer += this->success[_i1267].read(iprot); + xfer += this->success[_i1279].read(iprot); } xfer += iprot->readListEnd(); } @@ -21909,10 +21909,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1268; - for (_iter1268 = this->success.begin(); _iter1268 != this->success.end(); ++_iter1268) + std::vector ::const_iterator _iter1280; + for (_iter1280 = this->success.begin(); _iter1280 != this->success.end(); ++_iter1280) { - xfer += (*_iter1268).write(oprot); + xfer += (*_iter1280).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21961,14 +21961,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1269; - ::apache::thrift::protocol::TType _etype1272; - xfer += iprot->readListBegin(_etype1272, _size1269); - (*(this->success)).resize(_size1269); - uint32_t _i1273; - for (_i1273 = 0; _i1273 < _size1269; ++_i1273) + uint32_t _size1281; + ::apache::thrift::protocol::TType _etype1284; + xfer += iprot->readListBegin(_etype1284, _size1281); + (*(this->success)).resize(_size1281); + uint32_t _i1285; + for (_i1285 = 0; _i1285 < _size1281; ++_i1285) { - xfer += (*(this->success))[_i1273].read(iprot); + xfer += (*(this->success))[_i1285].read(iprot); } xfer += iprot->readListEnd(); } @@ -22146,14 +22146,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1274; - ::apache::thrift::protocol::TType _etype1277; - xfer += iprot->readListBegin(_etype1277, _size1274); - this->success.resize(_size1274); - uint32_t _i1278; - for (_i1278 = 0; _i1278 < _size1274; ++_i1278) + uint32_t _size1286; + ::apache::thrift::protocol::TType _etype1289; + xfer += iprot->readListBegin(_etype1289, _size1286); + this->success.resize(_size1286); + uint32_t _i1290; + for (_i1290 = 0; _i1290 < _size1286; ++_i1290) { - xfer += iprot->readString(this->success[_i1278]); + xfer += iprot->readString(this->success[_i1290]); } xfer += iprot->readListEnd(); } @@ -22192,10 +22192,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1279; - for (_iter1279 = this->success.begin(); _iter1279 != this->success.end(); ++_iter1279) + std::vector ::const_iterator _iter1291; + for (_iter1291 = this->success.begin(); _iter1291 != this->success.end(); ++_iter1291) { - xfer += oprot->writeString((*_iter1279)); + xfer += oprot->writeString((*_iter1291)); } xfer += oprot->writeListEnd(); } @@ -22240,14 +22240,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1280; - ::apache::thrift::protocol::TType _etype1283; - xfer += iprot->readListBegin(_etype1283, _size1280); - (*(this->success)).resize(_size1280); - uint32_t _i1284; - for (_i1284 = 0; _i1284 < _size1280; ++_i1284) + uint32_t _size1292; + ::apache::thrift::protocol::TType _etype1295; + xfer += iprot->readListBegin(_etype1295, _size1292); + (*(this->success)).resize(_size1292); + uint32_t _i1296; + for (_i1296 = 0; _i1296 < _size1292; ++_i1296) { - xfer += iprot->readString((*(this->success))[_i1284]); + xfer += iprot->readString((*(this->success))[_i1296]); } xfer += iprot->readListEnd(); } @@ -26274,14 +26274,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1285; - ::apache::thrift::protocol::TType _etype1288; - xfer += iprot->readListBegin(_etype1288, _size1285); - this->success.resize(_size1285); - uint32_t _i1289; - for (_i1289 = 0; _i1289 < _size1285; ++_i1289) + uint32_t _size1297; + ::apache::thrift::protocol::TType _etype1300; + xfer += iprot->readListBegin(_etype1300, _size1297); + this->success.resize(_size1297); + uint32_t _i1301; + for (_i1301 = 0; _i1301 < _size1297; ++_i1301) { - xfer += iprot->readString(this->success[_i1289]); + xfer += iprot->readString(this->success[_i1301]); } xfer += iprot->readListEnd(); } @@ -26320,10 +26320,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1290; - for (_iter1290 = this->success.begin(); _iter1290 != this->success.end(); ++_iter1290) + std::vector ::const_iterator _iter1302; + for (_iter1302 = this->success.begin(); _iter1302 != this->success.end(); ++_iter1302) { - xfer += oprot->writeString((*_iter1290)); + xfer += oprot->writeString((*_iter1302)); } xfer += oprot->writeListEnd(); } @@ -26368,14 +26368,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1291; - ::apache::thrift::protocol::TType _etype1294; - xfer += iprot->readListBegin(_etype1294, _size1291); - (*(this->success)).resize(_size1291); - uint32_t _i1295; - for (_i1295 = 0; _i1295 < _size1291; ++_i1295) + uint32_t _size1303; + ::apache::thrift::protocol::TType _etype1306; + xfer += iprot->readListBegin(_etype1306, _size1303); + (*(this->success)).resize(_size1303); + uint32_t _i1307; + for (_i1307 = 0; _i1307 < _size1303; ++_i1307) { - xfer += iprot->readString((*(this->success))[_i1295]); + xfer += iprot->readString((*(this->success))[_i1307]); } xfer += iprot->readListEnd(); } @@ -27335,14 +27335,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1296; - ::apache::thrift::protocol::TType _etype1299; - xfer += iprot->readListBegin(_etype1299, _size1296); - this->success.resize(_size1296); - uint32_t _i1300; - for (_i1300 = 0; _i1300 < _size1296; ++_i1300) + uint32_t _size1308; + ::apache::thrift::protocol::TType _etype1311; + xfer += iprot->readListBegin(_etype1311, _size1308); + this->success.resize(_size1308); + uint32_t _i1312; + for (_i1312 = 0; _i1312 < _size1308; ++_i1312) { - xfer += iprot->readString(this->success[_i1300]); + xfer += iprot->readString(this->success[_i1312]); } xfer += iprot->readListEnd(); } @@ -27381,10 +27381,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1301; - for (_iter1301 = this->success.begin(); _iter1301 != this->success.end(); ++_iter1301) + std::vector ::const_iterator _iter1313; + for (_iter1313 = this->success.begin(); _iter1313 != this->success.end(); ++_iter1313) { - xfer += oprot->writeString((*_iter1301)); + xfer += oprot->writeString((*_iter1313)); } xfer += oprot->writeListEnd(); } @@ -27429,14 +27429,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1302; - ::apache::thrift::protocol::TType _etype1305; - xfer += iprot->readListBegin(_etype1305, _size1302); - (*(this->success)).resize(_size1302); - uint32_t _i1306; - for (_i1306 = 0; _i1306 < _size1302; ++_i1306) + uint32_t _size1314; + ::apache::thrift::protocol::TType _etype1317; + xfer += iprot->readListBegin(_etype1317, _size1314); + (*(this->success)).resize(_size1314); + uint32_t _i1318; + for (_i1318 = 0; _i1318 < _size1314; ++_i1318) { - xfer += iprot->readString((*(this->success))[_i1306]); + xfer += iprot->readString((*(this->success))[_i1318]); } xfer += iprot->readListEnd(); } @@ -27509,9 +27509,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1307; - xfer += iprot->readI32(ecast1307); - this->principal_type = (PrincipalType::type)ecast1307; + int32_t ecast1319; + xfer += iprot->readI32(ecast1319); + this->principal_type = (PrincipalType::type)ecast1319; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -27527,9 +27527,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1308; - xfer += iprot->readI32(ecast1308); - this->grantorType = (PrincipalType::type)ecast1308; + int32_t ecast1320; + xfer += iprot->readI32(ecast1320); + this->grantorType = (PrincipalType::type)ecast1320; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -27800,9 +27800,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1309; - xfer += iprot->readI32(ecast1309); - this->principal_type = (PrincipalType::type)ecast1309; + int32_t ecast1321; + xfer += iprot->readI32(ecast1321); + this->principal_type = (PrincipalType::type)ecast1321; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -28033,9 +28033,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1310; - xfer += iprot->readI32(ecast1310); - this->principal_type = (PrincipalType::type)ecast1310; + int32_t ecast1322; + xfer += iprot->readI32(ecast1322); + this->principal_type = (PrincipalType::type)ecast1322; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -28124,14 +28124,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1311; - ::apache::thrift::protocol::TType _etype1314; - xfer += iprot->readListBegin(_etype1314, _size1311); - this->success.resize(_size1311); - uint32_t _i1315; - for (_i1315 = 0; _i1315 < _size1311; ++_i1315) + uint32_t _size1323; + ::apache::thrift::protocol::TType _etype1326; + xfer += iprot->readListBegin(_etype1326, _size1323); + this->success.resize(_size1323); + uint32_t _i1327; + for (_i1327 = 0; _i1327 < _size1323; ++_i1327) { - xfer += this->success[_i1315].read(iprot); + xfer += this->success[_i1327].read(iprot); } xfer += iprot->readListEnd(); } @@ -28170,10 +28170,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1316; - for (_iter1316 = this->success.begin(); _iter1316 != this->success.end(); ++_iter1316) + std::vector ::const_iterator _iter1328; + for (_iter1328 = this->success.begin(); _iter1328 != this->success.end(); ++_iter1328) { - xfer += (*_iter1316).write(oprot); + xfer += (*_iter1328).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28218,14 +28218,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1317; - ::apache::thrift::protocol::TType _etype1320; - xfer += iprot->readListBegin(_etype1320, _size1317); - (*(this->success)).resize(_size1317); - uint32_t _i1321; - for (_i1321 = 0; _i1321 < _size1317; ++_i1321) + uint32_t _size1329; + ::apache::thrift::protocol::TType _etype1332; + xfer += iprot->readListBegin(_etype1332, _size1329); + (*(this->success)).resize(_size1329); + uint32_t _i1333; + for (_i1333 = 0; _i1333 < _size1329; ++_i1333) { - xfer += (*(this->success))[_i1321].read(iprot); + xfer += (*(this->success))[_i1333].read(iprot); } xfer += iprot->readListEnd(); } @@ -28921,14 +28921,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1322; - ::apache::thrift::protocol::TType _etype1325; - xfer += iprot->readListBegin(_etype1325, _size1322); - this->group_names.resize(_size1322); - uint32_t _i1326; - for (_i1326 = 0; _i1326 < _size1322; ++_i1326) + uint32_t _size1334; + ::apache::thrift::protocol::TType _etype1337; + xfer += iprot->readListBegin(_etype1337, _size1334); + this->group_names.resize(_size1334); + uint32_t _i1338; + for (_i1338 = 0; _i1338 < _size1334; ++_i1338) { - xfer += iprot->readString(this->group_names[_i1326]); + xfer += iprot->readString(this->group_names[_i1338]); } xfer += iprot->readListEnd(); } @@ -28965,10 +28965,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1327; - for (_iter1327 = this->group_names.begin(); _iter1327 != this->group_names.end(); ++_iter1327) + std::vector ::const_iterator _iter1339; + for (_iter1339 = this->group_names.begin(); _iter1339 != this->group_names.end(); ++_iter1339) { - xfer += oprot->writeString((*_iter1327)); + xfer += oprot->writeString((*_iter1339)); } xfer += oprot->writeListEnd(); } @@ -29000,10 +29000,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1328; - for (_iter1328 = (*(this->group_names)).begin(); _iter1328 != (*(this->group_names)).end(); ++_iter1328) + std::vector ::const_iterator _iter1340; + for (_iter1340 = (*(this->group_names)).begin(); _iter1340 != (*(this->group_names)).end(); ++_iter1340) { - xfer += oprot->writeString((*_iter1328)); + xfer += oprot->writeString((*_iter1340)); } xfer += oprot->writeListEnd(); } @@ -29178,9 +29178,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1329; - xfer += iprot->readI32(ecast1329); - this->principal_type = (PrincipalType::type)ecast1329; + int32_t ecast1341; + xfer += iprot->readI32(ecast1341); + this->principal_type = (PrincipalType::type)ecast1341; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -29285,14 +29285,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1330; - ::apache::thrift::protocol::TType _etype1333; - xfer += iprot->readListBegin(_etype1333, _size1330); - this->success.resize(_size1330); - uint32_t _i1334; - for (_i1334 = 0; _i1334 < _size1330; ++_i1334) + uint32_t _size1342; + ::apache::thrift::protocol::TType _etype1345; + xfer += iprot->readListBegin(_etype1345, _size1342); + this->success.resize(_size1342); + uint32_t _i1346; + for (_i1346 = 0; _i1346 < _size1342; ++_i1346) { - xfer += this->success[_i1334].read(iprot); + xfer += this->success[_i1346].read(iprot); } xfer += iprot->readListEnd(); } @@ -29331,10 +29331,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1335; - for (_iter1335 = this->success.begin(); _iter1335 != this->success.end(); ++_iter1335) + std::vector ::const_iterator _iter1347; + for (_iter1347 = this->success.begin(); _iter1347 != this->success.end(); ++_iter1347) { - xfer += (*_iter1335).write(oprot); + xfer += (*_iter1347).write(oprot); } xfer += oprot->writeListEnd(); } @@ -29379,14 +29379,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1336; - ::apache::thrift::protocol::TType _etype1339; - xfer += iprot->readListBegin(_etype1339, _size1336); - (*(this->success)).resize(_size1336); - uint32_t _i1340; - for (_i1340 = 0; _i1340 < _size1336; ++_i1340) + uint32_t _size1348; + ::apache::thrift::protocol::TType _etype1351; + xfer += iprot->readListBegin(_etype1351, _size1348); + (*(this->success)).resize(_size1348); + uint32_t _i1352; + for (_i1352 = 0; _i1352 < _size1348; ++_i1352) { - xfer += (*(this->success))[_i1340].read(iprot); + xfer += (*(this->success))[_i1352].read(iprot); } xfer += iprot->readListEnd(); } @@ -30074,14 +30074,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1341; - ::apache::thrift::protocol::TType _etype1344; - xfer += iprot->readListBegin(_etype1344, _size1341); - this->group_names.resize(_size1341); - uint32_t _i1345; - for (_i1345 = 0; _i1345 < _size1341; ++_i1345) + uint32_t _size1353; + ::apache::thrift::protocol::TType _etype1356; + xfer += iprot->readListBegin(_etype1356, _size1353); + this->group_names.resize(_size1353); + uint32_t _i1357; + for (_i1357 = 0; _i1357 < _size1353; ++_i1357) { - xfer += iprot->readString(this->group_names[_i1345]); + xfer += iprot->readString(this->group_names[_i1357]); } xfer += iprot->readListEnd(); } @@ -30114,10 +30114,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1346; - for (_iter1346 = this->group_names.begin(); _iter1346 != this->group_names.end(); ++_iter1346) + std::vector ::const_iterator _iter1358; + for (_iter1358 = this->group_names.begin(); _iter1358 != this->group_names.end(); ++_iter1358) { - xfer += oprot->writeString((*_iter1346)); + xfer += oprot->writeString((*_iter1358)); } xfer += oprot->writeListEnd(); } @@ -30145,10 +30145,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1347; - for (_iter1347 = (*(this->group_names)).begin(); _iter1347 != (*(this->group_names)).end(); ++_iter1347) + std::vector ::const_iterator _iter1359; + for (_iter1359 = (*(this->group_names)).begin(); _iter1359 != (*(this->group_names)).end(); ++_iter1359) { - xfer += oprot->writeString((*_iter1347)); + xfer += oprot->writeString((*_iter1359)); } xfer += oprot->writeListEnd(); } @@ -30189,14 +30189,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1348; - ::apache::thrift::protocol::TType _etype1351; - xfer += iprot->readListBegin(_etype1351, _size1348); - this->success.resize(_size1348); - uint32_t _i1352; - for (_i1352 = 0; _i1352 < _size1348; ++_i1352) + uint32_t _size1360; + ::apache::thrift::protocol::TType _etype1363; + xfer += iprot->readListBegin(_etype1363, _size1360); + this->success.resize(_size1360); + uint32_t _i1364; + for (_i1364 = 0; _i1364 < _size1360; ++_i1364) { - xfer += iprot->readString(this->success[_i1352]); + xfer += iprot->readString(this->success[_i1364]); } xfer += iprot->readListEnd(); } @@ -30235,10 +30235,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1353; - for (_iter1353 = this->success.begin(); _iter1353 != this->success.end(); ++_iter1353) + std::vector ::const_iterator _iter1365; + for (_iter1365 = this->success.begin(); _iter1365 != this->success.end(); ++_iter1365) { - xfer += oprot->writeString((*_iter1353)); + xfer += oprot->writeString((*_iter1365)); } xfer += oprot->writeListEnd(); } @@ -30283,14 +30283,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1354; - ::apache::thrift::protocol::TType _etype1357; - xfer += iprot->readListBegin(_etype1357, _size1354); - (*(this->success)).resize(_size1354); - uint32_t _i1358; - for (_i1358 = 0; _i1358 < _size1354; ++_i1358) + uint32_t _size1366; + ::apache::thrift::protocol::TType _etype1369; + xfer += iprot->readListBegin(_etype1369, _size1366); + (*(this->success)).resize(_size1366); + uint32_t _i1370; + for (_i1370 = 0; _i1370 < _size1366; ++_i1370) { - xfer += iprot->readString((*(this->success))[_i1358]); + xfer += iprot->readString((*(this->success))[_i1370]); } xfer += iprot->readListEnd(); } @@ -31601,14 +31601,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1359; - ::apache::thrift::protocol::TType _etype1362; - xfer += iprot->readListBegin(_etype1362, _size1359); - this->success.resize(_size1359); - uint32_t _i1363; - for (_i1363 = 0; _i1363 < _size1359; ++_i1363) + uint32_t _size1371; + ::apache::thrift::protocol::TType _etype1374; + xfer += iprot->readListBegin(_etype1374, _size1371); + this->success.resize(_size1371); + uint32_t _i1375; + for (_i1375 = 0; _i1375 < _size1371; ++_i1375) { - xfer += iprot->readString(this->success[_i1363]); + xfer += iprot->readString(this->success[_i1375]); } xfer += iprot->readListEnd(); } @@ -31639,10 +31639,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1364; - for (_iter1364 = this->success.begin(); _iter1364 != this->success.end(); ++_iter1364) + std::vector ::const_iterator _iter1376; + for (_iter1376 = this->success.begin(); _iter1376 != this->success.end(); ++_iter1376) { - xfer += oprot->writeString((*_iter1364)); + xfer += oprot->writeString((*_iter1376)); } xfer += oprot->writeListEnd(); } @@ -31683,14 +31683,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1365; - ::apache::thrift::protocol::TType _etype1368; - xfer += iprot->readListBegin(_etype1368, _size1365); - (*(this->success)).resize(_size1365); - uint32_t _i1369; - for (_i1369 = 0; _i1369 < _size1365; ++_i1369) + uint32_t _size1377; + ::apache::thrift::protocol::TType _etype1380; + xfer += iprot->readListBegin(_etype1380, _size1377); + (*(this->success)).resize(_size1377); + uint32_t _i1381; + for (_i1381 = 0; _i1381 < _size1377; ++_i1381) { - xfer += iprot->readString((*(this->success))[_i1369]); + xfer += iprot->readString((*(this->success))[_i1381]); } xfer += iprot->readListEnd(); } @@ -32416,14 +32416,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1370; - ::apache::thrift::protocol::TType _etype1373; - xfer += iprot->readListBegin(_etype1373, _size1370); - this->success.resize(_size1370); - uint32_t _i1374; - for (_i1374 = 0; _i1374 < _size1370; ++_i1374) + uint32_t _size1382; + ::apache::thrift::protocol::TType _etype1385; + xfer += iprot->readListBegin(_etype1385, _size1382); + this->success.resize(_size1382); + uint32_t _i1386; + for (_i1386 = 0; _i1386 < _size1382; ++_i1386) { - xfer += iprot->readString(this->success[_i1374]); + xfer += iprot->readString(this->success[_i1386]); } xfer += iprot->readListEnd(); } @@ -32454,10 +32454,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1375; - for (_iter1375 = this->success.begin(); _iter1375 != this->success.end(); ++_iter1375) + std::vector ::const_iterator _iter1387; + for (_iter1387 = this->success.begin(); _iter1387 != this->success.end(); ++_iter1387) { - xfer += oprot->writeString((*_iter1375)); + xfer += oprot->writeString((*_iter1387)); } xfer += oprot->writeListEnd(); } @@ -32498,14 +32498,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1376; - ::apache::thrift::protocol::TType _etype1379; - xfer += iprot->readListBegin(_etype1379, _size1376); - (*(this->success)).resize(_size1376); - uint32_t _i1380; - for (_i1380 = 0; _i1380 < _size1376; ++_i1380) + uint32_t _size1388; + ::apache::thrift::protocol::TType _etype1391; + xfer += iprot->readListBegin(_etype1391, _size1388); + (*(this->success)).resize(_size1388); + uint32_t _i1392; + for (_i1392 = 0; _i1392 < _size1388; ++_i1392) { - xfer += iprot->readString((*(this->success))[_i1380]); + xfer += iprot->readString((*(this->success))[_i1392]); } xfer += iprot->readListEnd(); } @@ -37068,6 +37068,567 @@ uint32_t ThriftHiveMetastore_cache_file_metadata_presult::read(::apache::thrift: return xfer; } + +ThriftHiveMetastore_get_next_write_id_args::~ThriftHiveMetastore_get_next_write_id_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_next_write_id_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_next_write_id_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_write_id_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_next_write_id_pargs::~ThriftHiveMetastore_get_next_write_id_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_next_write_id_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_write_id_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_next_write_id_result::~ThriftHiveMetastore_get_next_write_id_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_next_write_id_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_next_write_id_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_write_id_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_next_write_id_presult::~ThriftHiveMetastore_get_next_write_id_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_next_write_id_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_finalize_write_id_args::~ThriftHiveMetastore_finalize_write_id_args() throw() { +} + + +uint32_t ThriftHiveMetastore_finalize_write_id_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_finalize_write_id_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_finalize_write_id_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_finalize_write_id_pargs::~ThriftHiveMetastore_finalize_write_id_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_finalize_write_id_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_finalize_write_id_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_finalize_write_id_result::~ThriftHiveMetastore_finalize_write_id_result() throw() { +} + + +uint32_t ThriftHiveMetastore_finalize_write_id_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_finalize_write_id_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_finalize_write_id_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_finalize_write_id_presult::~ThriftHiveMetastore_finalize_write_id_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_finalize_write_id_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_heartbeat_write_id_args::~ThriftHiveMetastore_heartbeat_write_id_args() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_write_id_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_heartbeat_write_id_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_write_id_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_write_id_pargs::~ThriftHiveMetastore_heartbeat_write_id_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_write_id_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_write_id_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_write_id_result::~ThriftHiveMetastore_heartbeat_write_id_result() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_write_id_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_heartbeat_write_id_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_write_id_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_write_id_presult::~ThriftHiveMetastore_heartbeat_write_id_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_write_id_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + void ThriftHiveMetastoreClient::getMetaConf(std::string& _return, const std::string& key) { send_getMetaConf(key); @@ -46512,6 +47073,180 @@ void ThriftHiveMetastoreClient::recv_cache_file_metadata(CacheFileMetadataResult throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); } +void ThriftHiveMetastoreClient::get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) +{ + send_get_next_write_id(req); + recv_get_next_write_id(_return); +} + +void ThriftHiveMetastoreClient::send_get_next_write_id(const GetNextWriteIdRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_next_write_id", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_next_write_id_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_next_write_id(GetNextWriteIdResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_next_write_id") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_next_write_id_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_write_id failed: unknown result"); +} + +void ThriftHiveMetastoreClient::finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) +{ + send_finalize_write_id(req); + recv_finalize_write_id(_return); +} + +void ThriftHiveMetastoreClient::send_finalize_write_id(const FinalizeWriteIdRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("finalize_write_id", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_finalize_write_id_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_finalize_write_id(FinalizeWriteIdResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("finalize_write_id") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_finalize_write_id_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "finalize_write_id failed: unknown result"); +} + +void ThriftHiveMetastoreClient::heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) +{ + send_heartbeat_write_id(req); + recv_heartbeat_write_id(_return); +} + +void ThriftHiveMetastoreClient::send_heartbeat_write_id(const HeartbeatWriteIdRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("heartbeat_write_id", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_heartbeat_write_id_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_heartbeat_write_id(HeartbeatWriteIdResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("heartbeat_write_id") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_heartbeat_write_id_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_write_id failed: unknown result"); +} + bool ThriftHiveMetastoreProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) { ProcessMap::iterator pfn; pfn = processMap_.find(fname); @@ -55349,6 +56084,168 @@ void ThriftHiveMetastoreProcessor::process_cache_file_metadata(int32_t seqid, :: } } +void ThriftHiveMetastoreProcessor::process_get_next_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_next_write_id", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_next_write_id"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_next_write_id"); + } + + ThriftHiveMetastore_get_next_write_id_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_next_write_id", bytes); + } + + ThriftHiveMetastore_get_next_write_id_result result; + try { + iface_->get_next_write_id(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_next_write_id"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_next_write_id", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_next_write_id"); + } + + oprot->writeMessageBegin("get_next_write_id", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_next_write_id", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_finalize_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.finalize_write_id", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.finalize_write_id"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.finalize_write_id"); + } + + ThriftHiveMetastore_finalize_write_id_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.finalize_write_id", bytes); + } + + ThriftHiveMetastore_finalize_write_id_result result; + try { + iface_->finalize_write_id(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.finalize_write_id"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("finalize_write_id", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.finalize_write_id"); + } + + oprot->writeMessageBegin("finalize_write_id", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.finalize_write_id", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_heartbeat_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.heartbeat_write_id", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.heartbeat_write_id"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.heartbeat_write_id"); + } + + ThriftHiveMetastore_heartbeat_write_id_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.heartbeat_write_id", bytes); + } + + ThriftHiveMetastore_heartbeat_write_id_result result; + try { + iface_->heartbeat_write_id(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.heartbeat_write_id"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("heartbeat_write_id", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.heartbeat_write_id"); + } + + oprot->writeMessageBegin("heartbeat_write_id", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.heartbeat_write_id", bytes); + } +} + ::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveMetastoreProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { ::apache::thrift::ReleaseHandler< ThriftHiveMetastoreIfFactory > cleanup(handlerFactory_); ::boost::shared_ptr< ThriftHiveMetastoreIf > handler(handlerFactory_->getHandler(connInfo), cleanup); @@ -67528,7 +68425,103 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("check_lock") != 0) { + if (fname.compare("check_lock") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_check_lock_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "check_lock failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::unlock(const UnlockRequest& rqst) +{ + int32_t seqid = send_unlock(rqst); + recv_unlock(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rqst) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("unlock", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_unlock_pargs args; + args.rqst = &rqst; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("unlock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67537,17 +68530,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_check_lock_presult result; - result.success = &_return; + ThriftHiveMetastore_unlock_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -67556,12 +68543,92 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, sentry.commit(); throw result.o2; } - if (result.__isset.o3) { + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) +{ + int32_t seqid = send_show_locks(rqst); + recv_show_locks(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequest& rqst) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_show_locks_pargs args; + args.rqst = &rqst; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); sentry.commit(); - throw result.o3; + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("show_locks") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_show_locks_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "check_lock failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_locks failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67571,20 +68638,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::unlock(const UnlockRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::heartbeat(const HeartbeatRequest& ids) { - int32_t seqid = send_unlock(rqst); - recv_unlock(seqid); + int32_t seqid = send_heartbeat(ids); + recv_heartbeat(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatRequest& ids) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("unlock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_unlock_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_heartbeat_pargs args; + args.ids = &ids; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67595,7 +68662,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rq return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) { int32_t rseqid = 0; @@ -67624,7 +68691,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("unlock") != 0) { + if (fname.compare("heartbeat") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67633,7 +68700,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_unlock_presult result; + ThriftHiveMetastore_heartbeat_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67646,6 +68713,10 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } sentry.commit(); return; } @@ -67657,20 +68728,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) { - int32_t seqid = send_show_locks(rqst); - recv_show_locks(_return, seqid); + int32_t seqid = send_heartbeat_txn_range(txns); + recv_heartbeat_txn_range(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const HeartbeatTxnRangeRequest& txns) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_show_locks_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_heartbeat_txn_range_pargs args; + args.txns = &txns; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67681,7 +68752,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67710,7 +68781,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("show_locks") != 0) { + if (fname.compare("heartbeat_txn_range") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67719,7 +68790,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_show_locks_presult result; + ThriftHiveMetastore_heartbeat_txn_range_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67731,7 +68802,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_locks failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_txn_range failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67741,20 +68812,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::heartbeat(const HeartbeatRequest& ids) +void ThriftHiveMetastoreConcurrentClient::compact(const CompactionRequest& rqst) { - int32_t seqid = send_heartbeat(ids); - recv_heartbeat(seqid); + int32_t seqid = send_compact(rqst); + recv_compact(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatRequest& ids) +int32_t ThriftHiveMetastoreConcurrentClient::send_compact(const CompactionRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("compact", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_heartbeat_pargs args; - args.ids = &ids; + ThriftHiveMetastore_compact_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67765,7 +68836,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatReque return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) { int32_t rseqid = 0; @@ -67794,7 +68865,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("heartbeat") != 0) { + if (fname.compare("compact") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67803,23 +68874,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_heartbeat_presult result; + ThriftHiveMetastore_compact_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } sentry.commit(); return; } @@ -67831,20 +68890,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) +void ThriftHiveMetastoreConcurrentClient::show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) { - int32_t seqid = send_heartbeat_txn_range(txns); - recv_heartbeat_txn_range(_return, seqid); + int32_t seqid = send_show_compact(rqst); + recv_show_compact(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const HeartbeatTxnRangeRequest& txns) +int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompactRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_heartbeat_txn_range_pargs args; - args.txns = &txns; + ThriftHiveMetastore_show_compact_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67855,7 +68914,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const Hear return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67884,7 +68943,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("heartbeat_txn_range") != 0) { + if (fname.compare("show_compact") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67893,7 +68952,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_heartbeat_txn_range_presult result; + ThriftHiveMetastore_show_compact_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67905,7 +68964,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_txn_range failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_compact failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67915,19 +68974,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::compact(const CompactionRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::add_dynamic_partitions(const AddDynamicPartitions& rqst) { - int32_t seqid = send_compact(rqst); - recv_compact(seqid); + int32_t seqid = send_add_dynamic_partitions(rqst); + recv_add_dynamic_partitions(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_compact(const CompactionRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const AddDynamicPartitions& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("compact", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_compact_pargs args; + ThriftHiveMetastore_add_dynamic_partitions_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -67939,7 +68998,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_compact(const CompactionReques return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int32_t seqid) { int32_t rseqid = 0; @@ -67968,7 +69027,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("compact") != 0) { + if (fname.compare("add_dynamic_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67977,11 +69036,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_compact_presult result; + ThriftHiveMetastore_add_dynamic_partitions_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } sentry.commit(); return; } @@ -67993,19 +69060,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) { - int32_t seqid = send_show_compact(rqst); - recv_show_compact(_return, seqid); + int32_t seqid = send_get_next_notification(rqst); + recv_get_next_notification(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompactRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const NotificationEventRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_show_compact_pargs args; + ThriftHiveMetastore_get_next_notification_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -68017,7 +69084,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompact return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(NotificationEventResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68046,7 +69113,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("show_compact") != 0) { + if (fname.compare("get_next_notification") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68055,7 +69122,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_show_compact_presult result; + ThriftHiveMetastore_get_next_notification_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68067,7 +69134,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_compact failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_notification failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68077,20 +69144,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_dynamic_partitions(const AddDynamicPartitions& rqst) +void ThriftHiveMetastoreConcurrentClient::get_current_notificationEventId(CurrentNotificationEventId& _return) { - int32_t seqid = send_add_dynamic_partitions(rqst); - recv_add_dynamic_partitions(seqid); + int32_t seqid = send_get_current_notificationEventId(); + recv_get_current_notificationEventId(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const AddDynamicPartitions& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventId() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_dynamic_partitions_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_get_current_notificationEventId_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68101,7 +69167,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const A return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(CurrentNotificationEventId& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68130,7 +69196,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_dynamic_partitions") != 0) { + if (fname.compare("get_current_notificationEventId") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68139,21 +69205,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_dynamic_partitions_presult result; + ThriftHiveMetastore_get_current_notificationEventId_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o2; + return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68163,19 +69227,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) { - int32_t seqid = send_get_next_notification(rqst); - recv_get_next_notification(_return, seqid); + int32_t seqid = send_fire_listener_event(rqst); + recv_fire_listener_event(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const NotificationEventRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const FireEventRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_next_notification_pargs args; + ThriftHiveMetastore_fire_listener_event_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -68187,7 +69251,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const No return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(NotificationEventResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68216,7 +69280,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_next_notification") != 0) { + if (fname.compare("fire_listener_event") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68225,7 +69289,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_next_notification_presult result; + ThriftHiveMetastore_fire_listener_event_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68237,7 +69301,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_notification failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68247,19 +69311,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_current_notificationEventId(CurrentNotificationEventId& _return) +void ThriftHiveMetastoreConcurrentClient::flushCache() { - int32_t seqid = send_get_current_notificationEventId(); - recv_get_current_notificationEventId(_return, seqid); + int32_t seqid = send_flushCache(); + recv_flushCache(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventId() +int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_current_notificationEventId_pargs args; + ThriftHiveMetastore_flushCache_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68270,7 +69334,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventI return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(CurrentNotificationEventId& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) { int32_t rseqid = 0; @@ -68299,7 +69363,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_current_notificationEventId") != 0) { + if (fname.compare("flushCache") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68308,7 +69372,85 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_current_notificationEventId_presult result; + ThriftHiveMetastore_flushCache_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) +{ + int32_t seqid = send_get_file_metadata_by_expr(req); + recv_get_file_metadata_by_expr(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_file_metadata_by_expr") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_file_metadata_by_expr_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68320,7 +69462,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68330,20 +69472,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) { - int32_t seqid = send_fire_listener_event(rqst); - recv_fire_listener_event(_return, seqid); + int32_t seqid = send_get_file_metadata(req); + recv_get_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const FireEventRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_fire_listener_event_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_get_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68354,7 +69496,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const Fire return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68383,7 +69525,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("fire_listener_event") != 0) { + if (fname.compare("get_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68392,7 +69534,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_fire_listener_event_presult result; + ThriftHiveMetastore_get_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68404,7 +69546,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68414,19 +69556,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::flushCache() +void ThriftHiveMetastoreConcurrentClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) { - int32_t seqid = send_flushCache(); - recv_flushCache(seqid); + int32_t seqid = send_put_file_metadata(req); + recv_put_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() +int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_flushCache_pargs args; + ThriftHiveMetastore_put_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68437,7 +69580,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68466,7 +69609,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("flushCache") != 0) { + if (fname.compare("put_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68475,13 +69618,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_flushCache_presult result; + ThriftHiveMetastore_put_file_metadata_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - sentry.commit(); - return; + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68491,19 +69640,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) +void ThriftHiveMetastoreConcurrentClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) { - int32_t seqid = send_get_file_metadata_by_expr(req); - recv_get_file_metadata_by_expr(_return, seqid); + int32_t seqid = send_clear_file_metadata(req); + recv_clear_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; + ThriftHiveMetastore_clear_file_metadata_pargs args; args.req = &req; args.write(oprot_); @@ -68515,7 +69664,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(cons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68544,7 +69693,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_file_metadata_by_expr") != 0) { + if (fname.compare("clear_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68553,7 +69702,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_file_metadata_by_expr_presult result; + ThriftHiveMetastore_clear_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68565,7 +69714,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68575,19 +69724,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) { - int32_t seqid = send_get_file_metadata(req); - recv_get_file_metadata(_return, seqid); + int32_t seqid = send_cache_file_metadata(req); + recv_cache_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const CacheFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_file_metadata_pargs args; + ThriftHiveMetastore_cache_file_metadata_pargs args; args.req = &req; args.write(oprot_); @@ -68599,7 +69748,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFil return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68628,7 +69777,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_file_metadata") != 0) { + if (fname.compare("cache_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68637,7 +69786,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_file_metadata_presult result; + ThriftHiveMetastore_cache_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68649,7 +69798,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68659,19 +69808,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) { - int32_t seqid = send_put_file_metadata(req); - recv_put_file_metadata(_return, seqid); + int32_t seqid = send_get_next_write_id(req); + recv_get_next_write_id(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_write_id(const GetNextWriteIdRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_next_write_id", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_put_file_metadata_pargs args; + ThriftHiveMetastore_get_next_write_id_pargs args; args.req = &req; args.write(oprot_); @@ -68683,7 +69832,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFil return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_next_write_id(GetNextWriteIdResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68712,7 +69861,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("put_file_metadata") != 0) { + if (fname.compare("get_next_write_id") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68721,7 +69870,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_put_file_metadata_presult result; + ThriftHiveMetastore_get_next_write_id_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68733,7 +69882,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_write_id failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68743,19 +69892,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) { - int32_t seqid = send_clear_file_metadata(req); - recv_clear_file_metadata(_return, seqid); + int32_t seqid = send_finalize_write_id(req); + recv_finalize_write_id(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_finalize_write_id(const FinalizeWriteIdRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("finalize_write_id", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_clear_file_metadata_pargs args; + ThriftHiveMetastore_finalize_write_id_pargs args; args.req = &req; args.write(oprot_); @@ -68767,7 +69916,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const Clea return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_finalize_write_id(FinalizeWriteIdResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68796,7 +69945,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("clear_file_metadata") != 0) { + if (fname.compare("finalize_write_id") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68805,7 +69954,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_clear_file_metadata_presult result; + ThriftHiveMetastore_finalize_write_id_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68817,7 +69966,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "finalize_write_id failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68827,19 +69976,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) { - int32_t seqid = send_cache_file_metadata(req); - recv_cache_file_metadata(_return, seqid); + int32_t seqid = send_heartbeat_write_id(req); + recv_heartbeat_write_id(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const CacheFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_write_id(const HeartbeatWriteIdRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat_write_id", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cache_file_metadata_pargs args; + ThriftHiveMetastore_heartbeat_write_id_pargs args; args.req = &req; args.write(oprot_); @@ -68851,7 +70000,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const Cach return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_write_id(HeartbeatWriteIdResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68880,7 +70029,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cache_file_metadata") != 0) { + if (fname.compare("heartbeat_write_id") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68889,7 +70038,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_cache_file_metadata_presult result; + ThriftHiveMetastore_heartbeat_write_id_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68901,7 +70050,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_write_id failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 6498eb1e17c2..df555ec81021 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -171,6 +171,9 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) = 0; virtual void clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) = 0; virtual void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) = 0; + virtual void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) = 0; + virtual void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) = 0; + virtual void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) = 0; }; class ThriftHiveMetastoreIfFactory : virtual public ::facebook::fb303::FacebookServiceIfFactory { @@ -675,6 +678,15 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void cache_file_metadata(CacheFileMetadataResult& /* _return */, const CacheFileMetadataRequest& /* req */) { return; } + void get_next_write_id(GetNextWriteIdResult& /* _return */, const GetNextWriteIdRequest& /* req */) { + return; + } + void finalize_write_id(FinalizeWriteIdResult& /* _return */, const FinalizeWriteIdRequest& /* req */) { + return; + } + void heartbeat_write_id(HeartbeatWriteIdResult& /* _return */, const HeartbeatWriteIdRequest& /* req */) { + return; + } }; typedef struct _ThriftHiveMetastore_getMetaConf_args__isset { @@ -19131,6 +19143,318 @@ class ThriftHiveMetastore_cache_file_metadata_presult { }; +typedef struct _ThriftHiveMetastore_get_next_write_id_args__isset { + _ThriftHiveMetastore_get_next_write_id_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_get_next_write_id_args__isset; + +class ThriftHiveMetastore_get_next_write_id_args { + public: + + ThriftHiveMetastore_get_next_write_id_args(const ThriftHiveMetastore_get_next_write_id_args&); + ThriftHiveMetastore_get_next_write_id_args& operator=(const ThriftHiveMetastore_get_next_write_id_args&); + ThriftHiveMetastore_get_next_write_id_args() { + } + + virtual ~ThriftHiveMetastore_get_next_write_id_args() throw(); + GetNextWriteIdRequest req; + + _ThriftHiveMetastore_get_next_write_id_args__isset __isset; + + void __set_req(const GetNextWriteIdRequest& val); + + bool operator == (const ThriftHiveMetastore_get_next_write_id_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_next_write_id_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_next_write_id_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_next_write_id_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_next_write_id_pargs() throw(); + const GetNextWriteIdRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_next_write_id_result__isset { + _ThriftHiveMetastore_get_next_write_id_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_next_write_id_result__isset; + +class ThriftHiveMetastore_get_next_write_id_result { + public: + + ThriftHiveMetastore_get_next_write_id_result(const ThriftHiveMetastore_get_next_write_id_result&); + ThriftHiveMetastore_get_next_write_id_result& operator=(const ThriftHiveMetastore_get_next_write_id_result&); + ThriftHiveMetastore_get_next_write_id_result() { + } + + virtual ~ThriftHiveMetastore_get_next_write_id_result() throw(); + GetNextWriteIdResult success; + + _ThriftHiveMetastore_get_next_write_id_result__isset __isset; + + void __set_success(const GetNextWriteIdResult& val); + + bool operator == (const ThriftHiveMetastore_get_next_write_id_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_next_write_id_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_next_write_id_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_next_write_id_presult__isset { + _ThriftHiveMetastore_get_next_write_id_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_next_write_id_presult__isset; + +class ThriftHiveMetastore_get_next_write_id_presult { + public: + + + virtual ~ThriftHiveMetastore_get_next_write_id_presult() throw(); + GetNextWriteIdResult* success; + + _ThriftHiveMetastore_get_next_write_id_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_finalize_write_id_args__isset { + _ThriftHiveMetastore_finalize_write_id_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_finalize_write_id_args__isset; + +class ThriftHiveMetastore_finalize_write_id_args { + public: + + ThriftHiveMetastore_finalize_write_id_args(const ThriftHiveMetastore_finalize_write_id_args&); + ThriftHiveMetastore_finalize_write_id_args& operator=(const ThriftHiveMetastore_finalize_write_id_args&); + ThriftHiveMetastore_finalize_write_id_args() { + } + + virtual ~ThriftHiveMetastore_finalize_write_id_args() throw(); + FinalizeWriteIdRequest req; + + _ThriftHiveMetastore_finalize_write_id_args__isset __isset; + + void __set_req(const FinalizeWriteIdRequest& val); + + bool operator == (const ThriftHiveMetastore_finalize_write_id_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_finalize_write_id_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_finalize_write_id_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_finalize_write_id_pargs { + public: + + + virtual ~ThriftHiveMetastore_finalize_write_id_pargs() throw(); + const FinalizeWriteIdRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_finalize_write_id_result__isset { + _ThriftHiveMetastore_finalize_write_id_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_finalize_write_id_result__isset; + +class ThriftHiveMetastore_finalize_write_id_result { + public: + + ThriftHiveMetastore_finalize_write_id_result(const ThriftHiveMetastore_finalize_write_id_result&); + ThriftHiveMetastore_finalize_write_id_result& operator=(const ThriftHiveMetastore_finalize_write_id_result&); + ThriftHiveMetastore_finalize_write_id_result() { + } + + virtual ~ThriftHiveMetastore_finalize_write_id_result() throw(); + FinalizeWriteIdResult success; + + _ThriftHiveMetastore_finalize_write_id_result__isset __isset; + + void __set_success(const FinalizeWriteIdResult& val); + + bool operator == (const ThriftHiveMetastore_finalize_write_id_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_finalize_write_id_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_finalize_write_id_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_finalize_write_id_presult__isset { + _ThriftHiveMetastore_finalize_write_id_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_finalize_write_id_presult__isset; + +class ThriftHiveMetastore_finalize_write_id_presult { + public: + + + virtual ~ThriftHiveMetastore_finalize_write_id_presult() throw(); + FinalizeWriteIdResult* success; + + _ThriftHiveMetastore_finalize_write_id_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_heartbeat_write_id_args__isset { + _ThriftHiveMetastore_heartbeat_write_id_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_heartbeat_write_id_args__isset; + +class ThriftHiveMetastore_heartbeat_write_id_args { + public: + + ThriftHiveMetastore_heartbeat_write_id_args(const ThriftHiveMetastore_heartbeat_write_id_args&); + ThriftHiveMetastore_heartbeat_write_id_args& operator=(const ThriftHiveMetastore_heartbeat_write_id_args&); + ThriftHiveMetastore_heartbeat_write_id_args() { + } + + virtual ~ThriftHiveMetastore_heartbeat_write_id_args() throw(); + HeartbeatWriteIdRequest req; + + _ThriftHiveMetastore_heartbeat_write_id_args__isset __isset; + + void __set_req(const HeartbeatWriteIdRequest& val); + + bool operator == (const ThriftHiveMetastore_heartbeat_write_id_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_heartbeat_write_id_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_heartbeat_write_id_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_heartbeat_write_id_pargs { + public: + + + virtual ~ThriftHiveMetastore_heartbeat_write_id_pargs() throw(); + const HeartbeatWriteIdRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_heartbeat_write_id_result__isset { + _ThriftHiveMetastore_heartbeat_write_id_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_heartbeat_write_id_result__isset; + +class ThriftHiveMetastore_heartbeat_write_id_result { + public: + + ThriftHiveMetastore_heartbeat_write_id_result(const ThriftHiveMetastore_heartbeat_write_id_result&); + ThriftHiveMetastore_heartbeat_write_id_result& operator=(const ThriftHiveMetastore_heartbeat_write_id_result&); + ThriftHiveMetastore_heartbeat_write_id_result() { + } + + virtual ~ThriftHiveMetastore_heartbeat_write_id_result() throw(); + HeartbeatWriteIdResult success; + + _ThriftHiveMetastore_heartbeat_write_id_result__isset __isset; + + void __set_success(const HeartbeatWriteIdResult& val); + + bool operator == (const ThriftHiveMetastore_heartbeat_write_id_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_heartbeat_write_id_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_heartbeat_write_id_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_heartbeat_write_id_presult__isset { + _ThriftHiveMetastore_heartbeat_write_id_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_heartbeat_write_id_presult__isset; + +class ThriftHiveMetastore_heartbeat_write_id_presult { + public: + + + virtual ~ThriftHiveMetastore_heartbeat_write_id_presult() throw(); + HeartbeatWriteIdResult* success; + + _ThriftHiveMetastore_heartbeat_write_id_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public ::facebook::fb303::FacebookServiceClient { public: ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : @@ -19589,6 +19913,15 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req); void send_cache_file_metadata(const CacheFileMetadataRequest& req); void recv_cache_file_metadata(CacheFileMetadataResult& _return); + void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req); + void send_get_next_write_id(const GetNextWriteIdRequest& req); + void recv_get_next_write_id(GetNextWriteIdResult& _return); + void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req); + void send_finalize_write_id(const FinalizeWriteIdRequest& req); + void recv_finalize_write_id(FinalizeWriteIdResult& _return); + void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req); + void send_heartbeat_write_id(const HeartbeatWriteIdRequest& req); + void recv_heartbeat_write_id(HeartbeatWriteIdResult& _return); }; class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceProcessor { @@ -19748,6 +20081,9 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_cache_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_next_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_finalize_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_heartbeat_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); public: ThriftHiveMetastoreProcessor(boost::shared_ptr iface) : ::facebook::fb303::FacebookServiceProcessor(iface), @@ -19901,6 +20237,9 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["put_file_metadata"] = &ThriftHiveMetastoreProcessor::process_put_file_metadata; processMap_["clear_file_metadata"] = &ThriftHiveMetastoreProcessor::process_clear_file_metadata; processMap_["cache_file_metadata"] = &ThriftHiveMetastoreProcessor::process_cache_file_metadata; + processMap_["get_next_write_id"] = &ThriftHiveMetastoreProcessor::process_get_next_write_id; + processMap_["finalize_write_id"] = &ThriftHiveMetastoreProcessor::process_finalize_write_id; + processMap_["heartbeat_write_id"] = &ThriftHiveMetastoreProcessor::process_heartbeat_write_id; } virtual ~ThriftHiveMetastoreProcessor() {} @@ -21361,6 +21700,36 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } + void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_next_write_id(_return, req); + } + ifaces_[i]->get_next_write_id(_return, req); + return; + } + + void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->finalize_write_id(_return, req); + } + ifaces_[i]->finalize_write_id(_return, req); + return; + } + + void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->heartbeat_write_id(_return, req); + } + ifaces_[i]->heartbeat_write_id(_return, req); + return; + } + }; // The 'concurrent' client is a thread safe client that correctly handles @@ -21824,6 +22193,15 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req); int32_t send_cache_file_metadata(const CacheFileMetadataRequest& req); void recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid); + void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req); + int32_t send_get_next_write_id(const GetNextWriteIdRequest& req); + void recv_get_next_write_id(GetNextWriteIdResult& _return, const int32_t seqid); + void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req); + int32_t send_finalize_write_id(const FinalizeWriteIdRequest& req); + void recv_finalize_write_id(FinalizeWriteIdResult& _return, const int32_t seqid); + void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req); + int32_t send_heartbeat_write_id(const HeartbeatWriteIdRequest& req); + void recv_heartbeat_write_id(HeartbeatWriteIdResult& _return, const int32_t seqid); }; #ifdef _WIN32 diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 3330b2fb0a4c..317598e80ea9 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -767,6 +767,21 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("cache_file_metadata\n"); } + void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) { + // Your implementation goes here + printf("get_next_write_id\n"); + } + + void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) { + // Your implementation goes here + printf("finalize_write_id\n"); + } + + void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) { + // Your implementation goes here + printf("heartbeat_write_id\n"); + } + }; int main(int argc, char **argv) { diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp index 1cbd176597b4..ccc61cb42523 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp @@ -59,6 +59,8 @@ hive_metastoreConstants::hive_metastoreConstants() { TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties"; + TABLE_IS_MM = "hivecommit"; + } }}} // namespace diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h index 3d068c3ec9e9..92a211647bc3 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h @@ -39,6 +39,7 @@ class hive_metastoreConstants { std::string TABLE_IS_TRANSACTIONAL; std::string TABLE_NO_AUTO_COMPACT; std::string TABLE_TRANSACTIONAL_PROPERTIES; + std::string TABLE_IS_MM; }; extern const hive_metastoreConstants g_hive_metastore_constants; diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 174b539b5ec1..bdfa35bcd58b 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -4471,6 +4471,16 @@ void Table::__set_temporary(const bool val) { __isset.temporary = true; } +void Table::__set_mmNextWriteId(const int64_t val) { + this->mmNextWriteId = val; +__isset.mmNextWriteId = true; +} + +void Table::__set_mmWatermarkWriteId(const int64_t val) { + this->mmWatermarkWriteId = val; +__isset.mmWatermarkWriteId = true; +} + uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -4631,6 +4641,22 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 15: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->mmNextWriteId); + this->__isset.mmNextWriteId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 16: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->mmWatermarkWriteId); + this->__isset.mmWatermarkWriteId = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -4723,6 +4749,16 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeBool(this->temporary); xfer += oprot->writeFieldEnd(); } + if (this->__isset.mmNextWriteId) { + xfer += oprot->writeFieldBegin("mmNextWriteId", ::apache::thrift::protocol::T_I64, 15); + xfer += oprot->writeI64(this->mmNextWriteId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.mmWatermarkWriteId) { + xfer += oprot->writeFieldBegin("mmWatermarkWriteId", ::apache::thrift::protocol::T_I64, 16); + xfer += oprot->writeI64(this->mmWatermarkWriteId); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -4744,6 +4780,8 @@ void swap(Table &a, Table &b) { swap(a.tableType, b.tableType); swap(a.privileges, b.privileges); swap(a.temporary, b.temporary); + swap(a.mmNextWriteId, b.mmNextWriteId); + swap(a.mmWatermarkWriteId, b.mmWatermarkWriteId); swap(a.__isset, b.__isset); } @@ -4762,6 +4800,8 @@ Table::Table(const Table& other221) { tableType = other221.tableType; privileges = other221.privileges; temporary = other221.temporary; + mmNextWriteId = other221.mmNextWriteId; + mmWatermarkWriteId = other221.mmWatermarkWriteId; __isset = other221.__isset; } Table& Table::operator=(const Table& other222) { @@ -4779,6 +4819,8 @@ Table& Table::operator=(const Table& other222) { tableType = other222.tableType; privileges = other222.privileges; temporary = other222.temporary; + mmNextWriteId = other222.mmNextWriteId; + mmWatermarkWriteId = other222.mmWatermarkWriteId; __isset = other222.__isset; return *this; } @@ -4799,6 +4841,8 @@ void Table::printTo(std::ostream& out) const { out << ", " << "tableType=" << to_string(tableType); out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); out << ", " << "temporary="; (__isset.temporary ? (out << to_string(temporary)) : (out << "")); + out << ", " << "mmNextWriteId="; (__isset.mmNextWriteId ? (out << to_string(mmNextWriteId)) : (out << "")); + out << ", " << "mmWatermarkWriteId="; (__isset.mmWatermarkWriteId ? (out << to_string(mmWatermarkWriteId)) : (out << "")); out << ")"; } @@ -17610,16 +17654,19 @@ void CacheFileMetadataRequest::printTo(std::ostream& out) const { } -GetAllFunctionsResponse::~GetAllFunctionsResponse() throw() { +GetNextWriteIdRequest::~GetNextWriteIdRequest() throw() { } -void GetAllFunctionsResponse::__set_functions(const std::vector & val) { - this->functions = val; -__isset.functions = true; +void GetNextWriteIdRequest::__set_dbName(const std::string& val) { + this->dbName = val; } -uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { +void GetNextWriteIdRequest::__set_tblName(const std::string& val) { + this->tblName = val; +} + +uint32_t GetNextWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -17631,6 +17678,8 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip using ::apache::thrift::protocol::TProtocolException; + bool isset_dbName = false; + bool isset_tblName = false; while (true) { @@ -17641,21 +17690,17 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->functions.clear(); - uint32_t _size739; - ::apache::thrift::protocol::TType _etype742; - xfer += iprot->readListBegin(_etype742, _size739); - this->functions.resize(_size739); - uint32_t _i743; - for (_i743 = 0; _i743 < _size739; ++_i743) - { - xfer += this->functions[_i743].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.functions = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tblName); + isset_tblName = true; } else { xfer += iprot->skip(ftype); } @@ -17669,77 +17714,162 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip xfer += iprot->readStructEnd(); + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tblName) + throw TProtocolException(TProtocolException::INVALID_DATA); return xfer; } -uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t GetNextWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("GetAllFunctionsResponse"); + xfer += oprot->writeStructBegin("GetNextWriteIdRequest"); - if (this->__isset.functions) { - xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tblName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetNextWriteIdRequest &a, GetNextWriteIdRequest &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tblName, b.tblName); +} + +GetNextWriteIdRequest::GetNextWriteIdRequest(const GetNextWriteIdRequest& other739) { + dbName = other739.dbName; + tblName = other739.tblName; +} +GetNextWriteIdRequest& GetNextWriteIdRequest::operator=(const GetNextWriteIdRequest& other740) { + dbName = other740.dbName; + tblName = other740.tblName; + return *this; +} +void GetNextWriteIdRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetNextWriteIdRequest("; + out << "dbName=" << to_string(dbName); + out << ", " << "tblName=" << to_string(tblName); + out << ")"; +} + + +GetNextWriteIdResult::~GetNextWriteIdResult() throw() { +} + + +void GetNextWriteIdResult::__set_writeId(const int64_t val) { + this->writeId = val; +} + +uint32_t GetNextWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_writeId = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter744; - for (_iter744 = this->functions.begin(); _iter744 != this->functions.end(); ++_iter744) - { - xfer += (*_iter744).write(oprot); - } - xfer += oprot->writeListEnd(); + case 1: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + isset_writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; } - xfer += oprot->writeFieldEnd(); + xfer += iprot->readFieldEnd(); } + + xfer += iprot->readStructEnd(); + + if (!isset_writeId) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetNextWriteIdResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetNextWriteIdResult"); + + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 1); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { +void swap(GetNextWriteIdResult &a, GetNextWriteIdResult &b) { using ::std::swap; - swap(a.functions, b.functions); - swap(a.__isset, b.__isset); + swap(a.writeId, b.writeId); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other745) { - functions = other745.functions; - __isset = other745.__isset; +GetNextWriteIdResult::GetNextWriteIdResult(const GetNextWriteIdResult& other741) { + writeId = other741.writeId; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other746) { - functions = other746.functions; - __isset = other746.__isset; +GetNextWriteIdResult& GetNextWriteIdResult::operator=(const GetNextWriteIdResult& other742) { + writeId = other742.writeId; return *this; } -void GetAllFunctionsResponse::printTo(std::ostream& out) const { +void GetNextWriteIdResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "GetAllFunctionsResponse("; - out << "functions="; (__isset.functions ? (out << to_string(functions)) : (out << "")); + out << "GetNextWriteIdResult("; + out << "writeId=" << to_string(writeId); out << ")"; } -TableMeta::~TableMeta() throw() { +FinalizeWriteIdRequest::~FinalizeWriteIdRequest() throw() { } -void TableMeta::__set_dbName(const std::string& val) { +void FinalizeWriteIdRequest::__set_dbName(const std::string& val) { this->dbName = val; } -void TableMeta::__set_tableName(const std::string& val) { - this->tableName = val; +void FinalizeWriteIdRequest::__set_tblName(const std::string& val) { + this->tblName = val; } -void TableMeta::__set_tableType(const std::string& val) { - this->tableType = val; +void FinalizeWriteIdRequest::__set_writeId(const int64_t val) { + this->writeId = val; } -void TableMeta::__set_comments(const std::string& val) { - this->comments = val; -__isset.comments = true; +void FinalizeWriteIdRequest::__set_commit(const bool val) { + this->commit = val; } -uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t FinalizeWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -17752,8 +17882,9 @@ uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { using ::apache::thrift::protocol::TProtocolException; bool isset_dbName = false; - bool isset_tableName = false; - bool isset_tableType = false; + bool isset_tblName = false; + bool isset_writeId = false; + bool isset_commit = false; while (true) { @@ -17773,24 +17904,24 @@ uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tableName); - isset_tableName = true; + xfer += iprot->readString(this->tblName); + isset_tblName = true; } else { xfer += iprot->skip(ftype); } break; case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tableType); - isset_tableType = true; + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + isset_writeId = true; } else { xfer += iprot->skip(ftype); } break; case 4: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->comments); - this->__isset.comments = true; + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->commit); + isset_commit = true; } else { xfer += iprot->skip(ftype); } @@ -17806,84 +17937,78 @@ uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { if (!isset_dbName) throw TProtocolException(TProtocolException::INVALID_DATA); - if (!isset_tableName) + if (!isset_tblName) throw TProtocolException(TProtocolException::INVALID_DATA); - if (!isset_tableType) + if (!isset_writeId) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_commit) throw TProtocolException(TProtocolException::INVALID_DATA); return xfer; } -uint32_t TableMeta::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t FinalizeWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("TableMeta"); + xfer += oprot->writeStructBegin("FinalizeWriteIdRequest"); xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tblName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->tableType); + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 3); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("commit", ::apache::thrift::protocol::T_BOOL, 4); + xfer += oprot->writeBool(this->commit); xfer += oprot->writeFieldEnd(); - if (this->__isset.comments) { - xfer += oprot->writeFieldBegin("comments", ::apache::thrift::protocol::T_STRING, 4); - xfer += oprot->writeString(this->comments); - xfer += oprot->writeFieldEnd(); - } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(TableMeta &a, TableMeta &b) { +void swap(FinalizeWriteIdRequest &a, FinalizeWriteIdRequest &b) { using ::std::swap; swap(a.dbName, b.dbName); - swap(a.tableName, b.tableName); - swap(a.tableType, b.tableType); - swap(a.comments, b.comments); - swap(a.__isset, b.__isset); + swap(a.tblName, b.tblName); + swap(a.writeId, b.writeId); + swap(a.commit, b.commit); } -TableMeta::TableMeta(const TableMeta& other747) { - dbName = other747.dbName; - tableName = other747.tableName; - tableType = other747.tableType; - comments = other747.comments; - __isset = other747.__isset; +FinalizeWriteIdRequest::FinalizeWriteIdRequest(const FinalizeWriteIdRequest& other743) { + dbName = other743.dbName; + tblName = other743.tblName; + writeId = other743.writeId; + commit = other743.commit; } -TableMeta& TableMeta::operator=(const TableMeta& other748) { - dbName = other748.dbName; - tableName = other748.tableName; - tableType = other748.tableType; - comments = other748.comments; - __isset = other748.__isset; +FinalizeWriteIdRequest& FinalizeWriteIdRequest::operator=(const FinalizeWriteIdRequest& other744) { + dbName = other744.dbName; + tblName = other744.tblName; + writeId = other744.writeId; + commit = other744.commit; return *this; } -void TableMeta::printTo(std::ostream& out) const { +void FinalizeWriteIdRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "TableMeta("; + out << "FinalizeWriteIdRequest("; out << "dbName=" << to_string(dbName); - out << ", " << "tableName=" << to_string(tableName); - out << ", " << "tableType=" << to_string(tableType); - out << ", " << "comments="; (__isset.comments ? (out << to_string(comments)) : (out << "")); + out << ", " << "tblName=" << to_string(tblName); + out << ", " << "writeId=" << to_string(writeId); + out << ", " << "commit=" << to_string(commit); out << ")"; } -MetaException::~MetaException() throw() { +FinalizeWriteIdResult::~FinalizeWriteIdResult() throw() { } -void MetaException::__set_message(const std::string& val) { - this->message = val; -} - -uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t FinalizeWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -17902,20 +18027,7 @@ uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->message); - this->__isset.message = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -17924,63 +18036,601 @@ uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { return xfer; } -uint32_t MetaException::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t FinalizeWriteIdResult::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("MetaException"); - - xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->message); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("FinalizeWriteIdResult"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(MetaException &a, MetaException &b) { +void swap(FinalizeWriteIdResult &a, FinalizeWriteIdResult &b) { using ::std::swap; - swap(a.message, b.message); - swap(a.__isset, b.__isset); + (void) a; + (void) b; } -MetaException::MetaException(const MetaException& other749) : TException() { - message = other749.message; - __isset = other749.__isset; +FinalizeWriteIdResult::FinalizeWriteIdResult(const FinalizeWriteIdResult& other745) { + (void) other745; } -MetaException& MetaException::operator=(const MetaException& other750) { - message = other750.message; - __isset = other750.__isset; +FinalizeWriteIdResult& FinalizeWriteIdResult::operator=(const FinalizeWriteIdResult& other746) { + (void) other746; return *this; } -void MetaException::printTo(std::ostream& out) const { +void FinalizeWriteIdResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "MetaException("; - out << "message=" << to_string(message); + out << "FinalizeWriteIdResult("; out << ")"; } -const char* MetaException::what() const throw() { - try { - std::stringstream ss; - ss << "TException - service has thrown: " << *this; - this->thriftTExceptionMessageHolder_ = ss.str(); - return this->thriftTExceptionMessageHolder_.c_str(); - } catch (const std::exception&) { - return "TException - service has thrown: MetaException"; - } + +HeartbeatWriteIdRequest::~HeartbeatWriteIdRequest() throw() { } -UnknownTableException::~UnknownTableException() throw() { +void HeartbeatWriteIdRequest::__set_dbName(const std::string& val) { + this->dbName = val; } +void HeartbeatWriteIdRequest::__set_tblName(const std::string& val) { + this->tblName = val; +} -void UnknownTableException::__set_message(const std::string& val) { - this->message = val; +void HeartbeatWriteIdRequest::__set_writeId(const int64_t val) { + this->writeId = val; } -uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t HeartbeatWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_dbName = false; + bool isset_tblName = false; + bool isset_writeId = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tblName); + isset_tblName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + isset_writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tblName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_writeId) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t HeartbeatWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("HeartbeatWriteIdRequest"); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tblName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 3); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(HeartbeatWriteIdRequest &a, HeartbeatWriteIdRequest &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tblName, b.tblName); + swap(a.writeId, b.writeId); +} + +HeartbeatWriteIdRequest::HeartbeatWriteIdRequest(const HeartbeatWriteIdRequest& other747) { + dbName = other747.dbName; + tblName = other747.tblName; + writeId = other747.writeId; +} +HeartbeatWriteIdRequest& HeartbeatWriteIdRequest::operator=(const HeartbeatWriteIdRequest& other748) { + dbName = other748.dbName; + tblName = other748.tblName; + writeId = other748.writeId; + return *this; +} +void HeartbeatWriteIdRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "HeartbeatWriteIdRequest("; + out << "dbName=" << to_string(dbName); + out << ", " << "tblName=" << to_string(tblName); + out << ", " << "writeId=" << to_string(writeId); + out << ")"; +} + + +HeartbeatWriteIdResult::~HeartbeatWriteIdResult() throw() { +} + + +uint32_t HeartbeatWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t HeartbeatWriteIdResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("HeartbeatWriteIdResult"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(HeartbeatWriteIdResult &a, HeartbeatWriteIdResult &b) { + using ::std::swap; + (void) a; + (void) b; +} + +HeartbeatWriteIdResult::HeartbeatWriteIdResult(const HeartbeatWriteIdResult& other749) { + (void) other749; +} +HeartbeatWriteIdResult& HeartbeatWriteIdResult::operator=(const HeartbeatWriteIdResult& other750) { + (void) other750; + return *this; +} +void HeartbeatWriteIdResult::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "HeartbeatWriteIdResult("; + out << ")"; +} + + +GetAllFunctionsResponse::~GetAllFunctionsResponse() throw() { +} + + +void GetAllFunctionsResponse::__set_functions(const std::vector & val) { + this->functions = val; +__isset.functions = true; +} + +uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->functions.clear(); + uint32_t _size751; + ::apache::thrift::protocol::TType _etype754; + xfer += iprot->readListBegin(_etype754, _size751); + this->functions.resize(_size751); + uint32_t _i755; + for (_i755 = 0; _i755 < _size751; ++_i755) + { + xfer += this->functions[_i755].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.functions = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetAllFunctionsResponse"); + + if (this->__isset.functions) { + xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); + std::vector ::const_iterator _iter756; + for (_iter756 = this->functions.begin(); _iter756 != this->functions.end(); ++_iter756) + { + xfer += (*_iter756).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { + using ::std::swap; + swap(a.functions, b.functions); + swap(a.__isset, b.__isset); +} + +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other757) { + functions = other757.functions; + __isset = other757.__isset; +} +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other758) { + functions = other758.functions; + __isset = other758.__isset; + return *this; +} +void GetAllFunctionsResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetAllFunctionsResponse("; + out << "functions="; (__isset.functions ? (out << to_string(functions)) : (out << "")); + out << ")"; +} + + +TableMeta::~TableMeta() throw() { +} + + +void TableMeta::__set_dbName(const std::string& val) { + this->dbName = val; +} + +void TableMeta::__set_tableName(const std::string& val) { + this->tableName = val; +} + +void TableMeta::__set_tableType(const std::string& val) { + this->tableType = val; +} + +void TableMeta::__set_comments(const std::string& val) { + this->comments = val; +__isset.comments = true; +} + +uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_dbName = false; + bool isset_tableName = false; + bool isset_tableType = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableName); + isset_tableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableType); + isset_tableType = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->comments); + this->__isset.comments = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tableName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tableType) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t TableMeta::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TableMeta"); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->tableType); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.comments) { + xfer += oprot->writeFieldBegin("comments", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->comments); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(TableMeta &a, TableMeta &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tableName, b.tableName); + swap(a.tableType, b.tableType); + swap(a.comments, b.comments); + swap(a.__isset, b.__isset); +} + +TableMeta::TableMeta(const TableMeta& other759) { + dbName = other759.dbName; + tableName = other759.tableName; + tableType = other759.tableType; + comments = other759.comments; + __isset = other759.__isset; +} +TableMeta& TableMeta::operator=(const TableMeta& other760) { + dbName = other760.dbName; + tableName = other760.tableName; + tableType = other760.tableType; + comments = other760.comments; + __isset = other760.__isset; + return *this; +} +void TableMeta::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "TableMeta("; + out << "dbName=" << to_string(dbName); + out << ", " << "tableName=" << to_string(tableName); + out << ", " << "tableType=" << to_string(tableType); + out << ", " << "comments="; (__isset.comments ? (out << to_string(comments)) : (out << "")); + out << ")"; +} + + +MetaException::~MetaException() throw() { +} + + +void MetaException::__set_message(const std::string& val) { + this->message = val; +} + +uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->message); + this->__isset.message = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t MetaException::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("MetaException"); + + xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->message); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(MetaException &a, MetaException &b) { + using ::std::swap; + swap(a.message, b.message); + swap(a.__isset, b.__isset); +} + +MetaException::MetaException(const MetaException& other761) : TException() { + message = other761.message; + __isset = other761.__isset; +} +MetaException& MetaException::operator=(const MetaException& other762) { + message = other762.message; + __isset = other762.__isset; + return *this; +} +void MetaException::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "MetaException("; + out << "message=" << to_string(message); + out << ")"; +} + +const char* MetaException::what() const throw() { + try { + std::stringstream ss; + ss << "TException - service has thrown: " << *this; + this->thriftTExceptionMessageHolder_ = ss.str(); + return this->thriftTExceptionMessageHolder_.c_str(); + } catch (const std::exception&) { + return "TException - service has thrown: MetaException"; + } +} + + +UnknownTableException::~UnknownTableException() throw() { +} + + +void UnknownTableException::__set_message(const std::string& val) { + this->message = val; +} + +uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18041,13 +18691,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other751) : TException() { - message = other751.message; - __isset = other751.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other763) : TException() { + message = other763.message; + __isset = other763.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other752) { - message = other752.message; - __isset = other752.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other764) { + message = other764.message; + __isset = other764.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -18138,13 +18788,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other753) : TException() { - message = other753.message; - __isset = other753.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other765) : TException() { + message = other765.message; + __isset = other765.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other754) { - message = other754.message; - __isset = other754.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other766) { + message = other766.message; + __isset = other766.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -18235,13 +18885,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other755) : TException() { - message = other755.message; - __isset = other755.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other767) : TException() { + message = other767.message; + __isset = other767.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other756) { - message = other756.message; - __isset = other756.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other768) { + message = other768.message; + __isset = other768.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -18332,13 +18982,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other757) : TException() { - message = other757.message; - __isset = other757.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other769) : TException() { + message = other769.message; + __isset = other769.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other758) { - message = other758.message; - __isset = other758.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other770) { + message = other770.message; + __isset = other770.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -18429,13 +19079,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other759) : TException() { - message = other759.message; - __isset = other759.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other771) : TException() { + message = other771.message; + __isset = other771.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other760) { - message = other760.message; - __isset = other760.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other772) { + message = other772.message; + __isset = other772.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -18526,13 +19176,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other761) : TException() { - message = other761.message; - __isset = other761.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other773) : TException() { + message = other773.message; + __isset = other773.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other762) { - message = other762.message; - __isset = other762.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other774) { + message = other774.message; + __isset = other774.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -18623,13 +19273,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other763) : TException() { - message = other763.message; - __isset = other763.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other775) : TException() { + message = other775.message; + __isset = other775.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other764) { - message = other764.message; - __isset = other764.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other776) { + message = other776.message; + __isset = other776.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -18720,13 +19370,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other765) : TException() { - message = other765.message; - __isset = other765.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other777) : TException() { + message = other777.message; + __isset = other777.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other766) { - message = other766.message; - __isset = other766.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other778) { + message = other778.message; + __isset = other778.__isset; return *this; } void IndexAlreadyExistsException::printTo(std::ostream& out) const { @@ -18817,13 +19467,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other767) : TException() { - message = other767.message; - __isset = other767.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other779) : TException() { + message = other779.message; + __isset = other779.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other768) { - message = other768.message; - __isset = other768.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other780) { + message = other780.message; + __isset = other780.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -18914,13 +19564,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other769) : TException() { - message = other769.message; - __isset = other769.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other781) : TException() { + message = other781.message; + __isset = other781.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other770) { - message = other770.message; - __isset = other770.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other782) { + message = other782.message; + __isset = other782.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -19011,13 +19661,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other771) : TException() { - message = other771.message; - __isset = other771.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other783) : TException() { + message = other783.message; + __isset = other783.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other772) { - message = other772.message; - __isset = other772.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other784) { + message = other784.message; + __isset = other784.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -19108,13 +19758,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other773) : TException() { - message = other773.message; - __isset = other773.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other785) : TException() { + message = other785.message; + __isset = other785.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other774) { - message = other774.message; - __isset = other774.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other786) { + message = other786.message; + __isset = other786.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -19205,13 +19855,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other775) : TException() { - message = other775.message; - __isset = other775.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other787) : TException() { + message = other787.message; + __isset = other787.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other776) { - message = other776.message; - __isset = other776.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other788) { + message = other788.message; + __isset = other788.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -19302,13 +19952,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other777) : TException() { - message = other777.message; - __isset = other777.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other789) : TException() { + message = other789.message; + __isset = other789.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other778) { - message = other778.message; - __isset = other778.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other790) { + message = other790.message; + __isset = other790.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -19399,13 +20049,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other779) : TException() { - message = other779.message; - __isset = other779.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other791) : TException() { + message = other791.message; + __isset = other791.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other780) { - message = other780.message; - __isset = other780.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other792) { + message = other792.message; + __isset = other792.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index bfec69462bf3..beddd4cb3eec 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -384,6 +384,18 @@ class CacheFileMetadataResult; class CacheFileMetadataRequest; +class GetNextWriteIdRequest; + +class GetNextWriteIdResult; + +class FinalizeWriteIdRequest; + +class FinalizeWriteIdResult; + +class HeartbeatWriteIdRequest; + +class HeartbeatWriteIdResult; + class GetAllFunctionsResponse; class TableMeta; @@ -2042,7 +2054,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj) } typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), mmNextWriteId(false), mmWatermarkWriteId(false) {} bool tableName :1; bool dbName :1; bool owner :1; @@ -2057,6 +2069,8 @@ typedef struct _Table__isset { bool tableType :1; bool privileges :1; bool temporary :1; + bool mmNextWriteId :1; + bool mmWatermarkWriteId :1; } _Table__isset; class Table { @@ -2064,7 +2078,7 @@ class Table { Table(const Table&); Table& operator=(const Table&); - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false) { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), mmNextWriteId(0), mmWatermarkWriteId(0) { } virtual ~Table() throw(); @@ -2082,6 +2096,8 @@ class Table { std::string tableType; PrincipalPrivilegeSet privileges; bool temporary; + int64_t mmNextWriteId; + int64_t mmWatermarkWriteId; _Table__isset __isset; @@ -2113,6 +2129,10 @@ class Table { void __set_temporary(const bool val); + void __set_mmNextWriteId(const int64_t val); + + void __set_mmWatermarkWriteId(const int64_t val); + bool operator == (const Table & rhs) const { if (!(tableName == rhs.tableName)) @@ -2147,6 +2167,14 @@ class Table { return false; else if (__isset.temporary && !(temporary == rhs.temporary)) return false; + if (__isset.mmNextWriteId != rhs.__isset.mmNextWriteId) + return false; + else if (__isset.mmNextWriteId && !(mmNextWriteId == rhs.mmNextWriteId)) + return false; + if (__isset.mmWatermarkWriteId != rhs.__isset.mmWatermarkWriteId) + return false; + else if (__isset.mmWatermarkWriteId && !(mmWatermarkWriteId == rhs.mmWatermarkWriteId)) + return false; return true; } bool operator != (const Table &rhs) const { @@ -7157,6 +7185,266 @@ inline std::ostream& operator<<(std::ostream& out, const CacheFileMetadataReques return out; } + +class GetNextWriteIdRequest { + public: + + GetNextWriteIdRequest(const GetNextWriteIdRequest&); + GetNextWriteIdRequest& operator=(const GetNextWriteIdRequest&); + GetNextWriteIdRequest() : dbName(), tblName() { + } + + virtual ~GetNextWriteIdRequest() throw(); + std::string dbName; + std::string tblName; + + void __set_dbName(const std::string& val); + + void __set_tblName(const std::string& val); + + bool operator == (const GetNextWriteIdRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tblName == rhs.tblName)) + return false; + return true; + } + bool operator != (const GetNextWriteIdRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetNextWriteIdRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetNextWriteIdRequest &a, GetNextWriteIdRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const GetNextWriteIdRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class GetNextWriteIdResult { + public: + + GetNextWriteIdResult(const GetNextWriteIdResult&); + GetNextWriteIdResult& operator=(const GetNextWriteIdResult&); + GetNextWriteIdResult() : writeId(0) { + } + + virtual ~GetNextWriteIdResult() throw(); + int64_t writeId; + + void __set_writeId(const int64_t val); + + bool operator == (const GetNextWriteIdResult & rhs) const + { + if (!(writeId == rhs.writeId)) + return false; + return true; + } + bool operator != (const GetNextWriteIdResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetNextWriteIdResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetNextWriteIdResult &a, GetNextWriteIdResult &b); + +inline std::ostream& operator<<(std::ostream& out, const GetNextWriteIdResult& obj) +{ + obj.printTo(out); + return out; +} + + +class FinalizeWriteIdRequest { + public: + + FinalizeWriteIdRequest(const FinalizeWriteIdRequest&); + FinalizeWriteIdRequest& operator=(const FinalizeWriteIdRequest&); + FinalizeWriteIdRequest() : dbName(), tblName(), writeId(0), commit(0) { + } + + virtual ~FinalizeWriteIdRequest() throw(); + std::string dbName; + std::string tblName; + int64_t writeId; + bool commit; + + void __set_dbName(const std::string& val); + + void __set_tblName(const std::string& val); + + void __set_writeId(const int64_t val); + + void __set_commit(const bool val); + + bool operator == (const FinalizeWriteIdRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tblName == rhs.tblName)) + return false; + if (!(writeId == rhs.writeId)) + return false; + if (!(commit == rhs.commit)) + return false; + return true; + } + bool operator != (const FinalizeWriteIdRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const FinalizeWriteIdRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(FinalizeWriteIdRequest &a, FinalizeWriteIdRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const FinalizeWriteIdRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class FinalizeWriteIdResult { + public: + + FinalizeWriteIdResult(const FinalizeWriteIdResult&); + FinalizeWriteIdResult& operator=(const FinalizeWriteIdResult&); + FinalizeWriteIdResult() { + } + + virtual ~FinalizeWriteIdResult() throw(); + + bool operator == (const FinalizeWriteIdResult & /* rhs */) const + { + return true; + } + bool operator != (const FinalizeWriteIdResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const FinalizeWriteIdResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(FinalizeWriteIdResult &a, FinalizeWriteIdResult &b); + +inline std::ostream& operator<<(std::ostream& out, const FinalizeWriteIdResult& obj) +{ + obj.printTo(out); + return out; +} + + +class HeartbeatWriteIdRequest { + public: + + HeartbeatWriteIdRequest(const HeartbeatWriteIdRequest&); + HeartbeatWriteIdRequest& operator=(const HeartbeatWriteIdRequest&); + HeartbeatWriteIdRequest() : dbName(), tblName(), writeId(0) { + } + + virtual ~HeartbeatWriteIdRequest() throw(); + std::string dbName; + std::string tblName; + int64_t writeId; + + void __set_dbName(const std::string& val); + + void __set_tblName(const std::string& val); + + void __set_writeId(const int64_t val); + + bool operator == (const HeartbeatWriteIdRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tblName == rhs.tblName)) + return false; + if (!(writeId == rhs.writeId)) + return false; + return true; + } + bool operator != (const HeartbeatWriteIdRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const HeartbeatWriteIdRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(HeartbeatWriteIdRequest &a, HeartbeatWriteIdRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const HeartbeatWriteIdRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class HeartbeatWriteIdResult { + public: + + HeartbeatWriteIdResult(const HeartbeatWriteIdResult&); + HeartbeatWriteIdResult& operator=(const HeartbeatWriteIdResult&); + HeartbeatWriteIdResult() { + } + + virtual ~HeartbeatWriteIdResult() throw(); + + bool operator == (const HeartbeatWriteIdResult & /* rhs */) const + { + return true; + } + bool operator != (const HeartbeatWriteIdResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const HeartbeatWriteIdResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(HeartbeatWriteIdResult &a, HeartbeatWriteIdResult &b); + +inline std::ostream& operator<<(std::ostream& out, const HeartbeatWriteIdResult& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _GetAllFunctionsResponse__isset { _GetAllFunctionsResponse__isset() : functions(false) {} bool functions :1; diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java new file mode 100644 index 000000000000..f47460228128 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java @@ -0,0 +1,684 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class FinalizeWriteIdRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FinalizeWriteIdRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)3); + private static final org.apache.thrift.protocol.TField COMMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("commit", org.apache.thrift.protocol.TType.BOOL, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new FinalizeWriteIdRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new FinalizeWriteIdRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tblName; // required + private long writeId; // required + private boolean commit; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TBL_NAME((short)2, "tblName"), + WRITE_ID((short)3, "writeId"), + COMMIT((short)4, "commit"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // WRITE_ID + return WRITE_ID; + case 4: // COMMIT + return COMMIT; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEID_ISSET_ID = 0; + private static final int __COMMIT_ISSET_ID = 1; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.COMMIT, new org.apache.thrift.meta_data.FieldMetaData("commit", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FinalizeWriteIdRequest.class, metaDataMap); + } + + public FinalizeWriteIdRequest() { + } + + public FinalizeWriteIdRequest( + String dbName, + String tblName, + long writeId, + boolean commit) + { + this(); + this.dbName = dbName; + this.tblName = tblName; + this.writeId = writeId; + setWriteIdIsSet(true); + this.commit = commit; + setCommitIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public FinalizeWriteIdRequest(FinalizeWriteIdRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTblName()) { + this.tblName = other.tblName; + } + this.writeId = other.writeId; + this.commit = other.commit; + } + + public FinalizeWriteIdRequest deepCopy() { + return new FinalizeWriteIdRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tblName = null; + setWriteIdIsSet(false); + this.writeId = 0; + setCommitIsSet(false); + this.commit = false; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTblName() { + return this.tblName; + } + + public void setTblName(String tblName) { + this.tblName = tblName; + } + + public void unsetTblName() { + this.tblName = null; + } + + /** Returns true if field tblName is set (has been assigned a value) and false otherwise */ + public boolean isSetTblName() { + return this.tblName != null; + } + + public void setTblNameIsSet(boolean value) { + if (!value) { + this.tblName = null; + } + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public boolean isCommit() { + return this.commit; + } + + public void setCommit(boolean commit) { + this.commit = commit; + setCommitIsSet(true); + } + + public void unsetCommit() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COMMIT_ISSET_ID); + } + + /** Returns true if field commit is set (has been assigned a value) and false otherwise */ + public boolean isSetCommit() { + return EncodingUtils.testBit(__isset_bitfield, __COMMIT_ISSET_ID); + } + + public void setCommitIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COMMIT_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTblName(); + } else { + setTblName((String)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + case COMMIT: + if (value == null) { + unsetCommit(); + } else { + setCommit((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TBL_NAME: + return getTblName(); + + case WRITE_ID: + return getWriteId(); + + case COMMIT: + return isCommit(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TBL_NAME: + return isSetTblName(); + case WRITE_ID: + return isSetWriteId(); + case COMMIT: + return isSetCommit(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof FinalizeWriteIdRequest) + return this.equals((FinalizeWriteIdRequest)that); + return false; + } + + public boolean equals(FinalizeWriteIdRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tblName = true && this.isSetTblName(); + boolean that_present_tblName = true && that.isSetTblName(); + if (this_present_tblName || that_present_tblName) { + if (!(this_present_tblName && that_present_tblName)) + return false; + if (!this.tblName.equals(that.tblName)) + return false; + } + + boolean this_present_writeId = true; + boolean that_present_writeId = true; + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + boolean this_present_commit = true; + boolean that_present_commit = true; + if (this_present_commit || that_present_commit) { + if (!(this_present_commit && that_present_commit)) + return false; + if (this.commit != that.commit) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tblName = true && (isSetTblName()); + list.add(present_tblName); + if (present_tblName) + list.add(tblName); + + boolean present_writeId = true; + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + boolean present_commit = true; + list.add(present_commit); + if (present_commit) + list.add(commit); + + return list.hashCode(); + } + + @Override + public int compareTo(FinalizeWriteIdRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTblName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetCommit()).compareTo(other.isSetCommit()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCommit()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.commit, other.commit); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("FinalizeWriteIdRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tblName:"); + if (this.tblName == null) { + sb.append("null"); + } else { + sb.append(this.tblName); + } + first = false; + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + if (!first) sb.append(", "); + sb.append("commit:"); + sb.append(this.commit); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTblName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString()); + } + + if (!isSetWriteId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString()); + } + + if (!isSetCommit()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'commit' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class FinalizeWriteIdRequestStandardSchemeFactory implements SchemeFactory { + public FinalizeWriteIdRequestStandardScheme getScheme() { + return new FinalizeWriteIdRequestStandardScheme(); + } + } + + private static class FinalizeWriteIdRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // COMMIT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.commit = iprot.readBool(); + struct.setCommitIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tblName != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tblName); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(COMMIT_FIELD_DESC); + oprot.writeBool(struct.commit); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class FinalizeWriteIdRequestTupleSchemeFactory implements SchemeFactory { + public FinalizeWriteIdRequestTupleScheme getScheme() { + return new FinalizeWriteIdRequestTupleScheme(); + } + } + + private static class FinalizeWriteIdRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tblName); + oprot.writeI64(struct.writeId); + oprot.writeBool(struct.commit); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + struct.commit = iprot.readBool(); + struct.setCommitIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java new file mode 100644 index 000000000000..8e8b504b9aac --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class FinalizeWriteIdResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FinalizeWriteIdResult"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new FinalizeWriteIdResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new FinalizeWriteIdResultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FinalizeWriteIdResult.class, metaDataMap); + } + + public FinalizeWriteIdResult() { + } + + /** + * Performs a deep copy on other. + */ + public FinalizeWriteIdResult(FinalizeWriteIdResult other) { + } + + public FinalizeWriteIdResult deepCopy() { + return new FinalizeWriteIdResult(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof FinalizeWriteIdResult) + return this.equals((FinalizeWriteIdResult)that); + return false; + } + + public boolean equals(FinalizeWriteIdResult that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(FinalizeWriteIdResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("FinalizeWriteIdResult("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class FinalizeWriteIdResultStandardSchemeFactory implements SchemeFactory { + public FinalizeWriteIdResultStandardScheme getScheme() { + return new FinalizeWriteIdResultStandardScheme(); + } + } + + private static class FinalizeWriteIdResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class FinalizeWriteIdResultTupleSchemeFactory implements SchemeFactory { + public FinalizeWriteIdResultTupleScheme getScheme() { + return new FinalizeWriteIdResultTupleScheme(); + } + } + + private static class FinalizeWriteIdResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java new file mode 100644 index 000000000000..dab13fded3a7 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java @@ -0,0 +1,490 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class GetNextWriteIdRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetNextWriteIdRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetNextWriteIdRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetNextWriteIdRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tblName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TBL_NAME((short)2, "tblName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetNextWriteIdRequest.class, metaDataMap); + } + + public GetNextWriteIdRequest() { + } + + public GetNextWriteIdRequest( + String dbName, + String tblName) + { + this(); + this.dbName = dbName; + this.tblName = tblName; + } + + /** + * Performs a deep copy on other. + */ + public GetNextWriteIdRequest(GetNextWriteIdRequest other) { + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTblName()) { + this.tblName = other.tblName; + } + } + + public GetNextWriteIdRequest deepCopy() { + return new GetNextWriteIdRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tblName = null; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTblName() { + return this.tblName; + } + + public void setTblName(String tblName) { + this.tblName = tblName; + } + + public void unsetTblName() { + this.tblName = null; + } + + /** Returns true if field tblName is set (has been assigned a value) and false otherwise */ + public boolean isSetTblName() { + return this.tblName != null; + } + + public void setTblNameIsSet(boolean value) { + if (!value) { + this.tblName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTblName(); + } else { + setTblName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TBL_NAME: + return getTblName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TBL_NAME: + return isSetTblName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetNextWriteIdRequest) + return this.equals((GetNextWriteIdRequest)that); + return false; + } + + public boolean equals(GetNextWriteIdRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tblName = true && this.isSetTblName(); + boolean that_present_tblName = true && that.isSetTblName(); + if (this_present_tblName || that_present_tblName) { + if (!(this_present_tblName && that_present_tblName)) + return false; + if (!this.tblName.equals(that.tblName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tblName = true && (isSetTblName()); + list.add(present_tblName); + if (present_tblName) + list.add(tblName); + + return list.hashCode(); + } + + @Override + public int compareTo(GetNextWriteIdRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTblName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetNextWriteIdRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tblName:"); + if (this.tblName == null) { + sb.append("null"); + } else { + sb.append(this.tblName); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTblName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetNextWriteIdRequestStandardSchemeFactory implements SchemeFactory { + public GetNextWriteIdRequestStandardScheme getScheme() { + return new GetNextWriteIdRequestStandardScheme(); + } + } + + private static class GetNextWriteIdRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tblName != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tblName); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetNextWriteIdRequestTupleSchemeFactory implements SchemeFactory { + public GetNextWriteIdRequestTupleScheme getScheme() { + return new GetNextWriteIdRequestTupleScheme(); + } + } + + private static class GetNextWriteIdRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tblName); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java new file mode 100644 index 000000000000..97ad28421c25 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java @@ -0,0 +1,387 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class GetNextWriteIdResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetNextWriteIdResult"); + + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetNextWriteIdResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetNextWriteIdResultTupleSchemeFactory()); + } + + private long writeId; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + WRITE_ID((short)1, "writeId"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // WRITE_ID + return WRITE_ID; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetNextWriteIdResult.class, metaDataMap); + } + + public GetNextWriteIdResult() { + } + + public GetNextWriteIdResult( + long writeId) + { + this(); + this.writeId = writeId; + setWriteIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public GetNextWriteIdResult(GetNextWriteIdResult other) { + __isset_bitfield = other.__isset_bitfield; + this.writeId = other.writeId; + } + + public GetNextWriteIdResult deepCopy() { + return new GetNextWriteIdResult(this); + } + + @Override + public void clear() { + setWriteIdIsSet(false); + this.writeId = 0; + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case WRITE_ID: + return getWriteId(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case WRITE_ID: + return isSetWriteId(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetNextWriteIdResult) + return this.equals((GetNextWriteIdResult)that); + return false; + } + + public boolean equals(GetNextWriteIdResult that) { + if (that == null) + return false; + + boolean this_present_writeId = true; + boolean that_present_writeId = true; + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_writeId = true; + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + return list.hashCode(); + } + + @Override + public int compareTo(GetNextWriteIdResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetNextWriteIdResult("); + boolean first = true; + + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetWriteId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetNextWriteIdResultStandardSchemeFactory implements SchemeFactory { + public GetNextWriteIdResultStandardScheme getScheme() { + return new GetNextWriteIdResultStandardScheme(); + } + } + + private static class GetNextWriteIdResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetNextWriteIdResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetNextWriteIdResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetNextWriteIdResultTupleSchemeFactory implements SchemeFactory { + public GetNextWriteIdResultTupleScheme getScheme() { + return new GetNextWriteIdResultTupleScheme(); + } + } + + private static class GetNextWriteIdResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.writeId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java new file mode 100644 index 000000000000..0c1849c5f2c9 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java @@ -0,0 +1,589 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class HeartbeatWriteIdRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatWriteIdRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new HeartbeatWriteIdRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new HeartbeatWriteIdRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tblName; // required + private long writeId; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TBL_NAME((short)2, "tblName"), + WRITE_ID((short)3, "writeId"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // WRITE_ID + return WRITE_ID; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HeartbeatWriteIdRequest.class, metaDataMap); + } + + public HeartbeatWriteIdRequest() { + } + + public HeartbeatWriteIdRequest( + String dbName, + String tblName, + long writeId) + { + this(); + this.dbName = dbName; + this.tblName = tblName; + this.writeId = writeId; + setWriteIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public HeartbeatWriteIdRequest(HeartbeatWriteIdRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTblName()) { + this.tblName = other.tblName; + } + this.writeId = other.writeId; + } + + public HeartbeatWriteIdRequest deepCopy() { + return new HeartbeatWriteIdRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tblName = null; + setWriteIdIsSet(false); + this.writeId = 0; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTblName() { + return this.tblName; + } + + public void setTblName(String tblName) { + this.tblName = tblName; + } + + public void unsetTblName() { + this.tblName = null; + } + + /** Returns true if field tblName is set (has been assigned a value) and false otherwise */ + public boolean isSetTblName() { + return this.tblName != null; + } + + public void setTblNameIsSet(boolean value) { + if (!value) { + this.tblName = null; + } + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTblName(); + } else { + setTblName((String)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TBL_NAME: + return getTblName(); + + case WRITE_ID: + return getWriteId(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TBL_NAME: + return isSetTblName(); + case WRITE_ID: + return isSetWriteId(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof HeartbeatWriteIdRequest) + return this.equals((HeartbeatWriteIdRequest)that); + return false; + } + + public boolean equals(HeartbeatWriteIdRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tblName = true && this.isSetTblName(); + boolean that_present_tblName = true && that.isSetTblName(); + if (this_present_tblName || that_present_tblName) { + if (!(this_present_tblName && that_present_tblName)) + return false; + if (!this.tblName.equals(that.tblName)) + return false; + } + + boolean this_present_writeId = true; + boolean that_present_writeId = true; + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tblName = true && (isSetTblName()); + list.add(present_tblName); + if (present_tblName) + list.add(tblName); + + boolean present_writeId = true; + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + return list.hashCode(); + } + + @Override + public int compareTo(HeartbeatWriteIdRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTblName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("HeartbeatWriteIdRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tblName:"); + if (this.tblName == null) { + sb.append("null"); + } else { + sb.append(this.tblName); + } + first = false; + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTblName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString()); + } + + if (!isSetWriteId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class HeartbeatWriteIdRequestStandardSchemeFactory implements SchemeFactory { + public HeartbeatWriteIdRequestStandardScheme getScheme() { + return new HeartbeatWriteIdRequestStandardScheme(); + } + } + + private static class HeartbeatWriteIdRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tblName != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tblName); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class HeartbeatWriteIdRequestTupleSchemeFactory implements SchemeFactory { + public HeartbeatWriteIdRequestTupleScheme getScheme() { + return new HeartbeatWriteIdRequestTupleScheme(); + } + } + + private static class HeartbeatWriteIdRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tblName); + oprot.writeI64(struct.writeId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java new file mode 100644 index 000000000000..ae6f25e815b0 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class HeartbeatWriteIdResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatWriteIdResult"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new HeartbeatWriteIdResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new HeartbeatWriteIdResultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HeartbeatWriteIdResult.class, metaDataMap); + } + + public HeartbeatWriteIdResult() { + } + + /** + * Performs a deep copy on other. + */ + public HeartbeatWriteIdResult(HeartbeatWriteIdResult other) { + } + + public HeartbeatWriteIdResult deepCopy() { + return new HeartbeatWriteIdResult(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof HeartbeatWriteIdResult) + return this.equals((HeartbeatWriteIdResult)that); + return false; + } + + public boolean equals(HeartbeatWriteIdResult that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(HeartbeatWriteIdResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("HeartbeatWriteIdResult("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class HeartbeatWriteIdResultStandardSchemeFactory implements SchemeFactory { + public HeartbeatWriteIdResultStandardScheme getScheme() { + return new HeartbeatWriteIdResultStandardScheme(); + } + } + + private static class HeartbeatWriteIdResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class HeartbeatWriteIdResultTupleSchemeFactory implements SchemeFactory { + public HeartbeatWriteIdResultTupleScheme getScheme() { + return new HeartbeatWriteIdResultTupleScheme(); + } + } + + private static class HeartbeatWriteIdResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index 5d683fb615e2..9c228c749e34 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -52,6 +52,8 @@ public class Table implements org.apache.thrift.TBase, jav private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)12); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)13); private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14); + private static final org.apache.thrift.protocol.TField MM_NEXT_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("mmNextWriteId", org.apache.thrift.protocol.TType.I64, (short)15); + private static final org.apache.thrift.protocol.TField MM_WATERMARK_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("mmWatermarkWriteId", org.apache.thrift.protocol.TType.I64, (short)16); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -73,6 +75,8 @@ public class Table implements org.apache.thrift.TBase, jav private String tableType; // required private PrincipalPrivilegeSet privileges; // optional private boolean temporary; // optional + private long mmNextWriteId; // optional + private long mmWatermarkWriteId; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -89,7 +93,9 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { VIEW_EXPANDED_TEXT((short)11, "viewExpandedText"), TABLE_TYPE((short)12, "tableType"), PRIVILEGES((short)13, "privileges"), - TEMPORARY((short)14, "temporary"); + TEMPORARY((short)14, "temporary"), + MM_NEXT_WRITE_ID((short)15, "mmNextWriteId"), + MM_WATERMARK_WRITE_ID((short)16, "mmWatermarkWriteId"); private static final Map byName = new HashMap(); @@ -132,6 +138,10 @@ public static _Fields findByThriftId(int fieldId) { return PRIVILEGES; case 14: // TEMPORARY return TEMPORARY; + case 15: // MM_NEXT_WRITE_ID + return MM_NEXT_WRITE_ID; + case 16: // MM_WATERMARK_WRITE_ID + return MM_WATERMARK_WRITE_ID; default: return null; } @@ -176,8 +186,10 @@ public String getFieldName() { private static final int __LASTACCESSTIME_ISSET_ID = 1; private static final int __RETENTION_ISSET_ID = 2; private static final int __TEMPORARY_ISSET_ID = 3; + private static final int __MMNEXTWRITEID_ISSET_ID = 4; + private static final int __MMWATERMARKWRITEID_ISSET_ID = 5; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.MM_NEXT_WRITE_ID,_Fields.MM_WATERMARK_WRITE_ID}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -212,6 +224,10 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); tmpMap.put(_Fields.TEMPORARY, new org.apache.thrift.meta_data.FieldMetaData("temporary", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.MM_NEXT_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("mmNextWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.MM_WATERMARK_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("mmWatermarkWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap); } @@ -297,6 +313,8 @@ public Table(Table other) { this.privileges = new PrincipalPrivilegeSet(other.privileges); } this.temporary = other.temporary; + this.mmNextWriteId = other.mmNextWriteId; + this.mmWatermarkWriteId = other.mmWatermarkWriteId; } public Table deepCopy() { @@ -323,6 +341,10 @@ public void clear() { this.privileges = null; this.temporary = false; + setMmNextWriteIdIsSet(false); + this.mmNextWriteId = 0; + setMmWatermarkWriteIdIsSet(false); + this.mmWatermarkWriteId = 0; } public String getTableName() { @@ -669,6 +691,50 @@ public void setTemporaryIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TEMPORARY_ISSET_ID, value); } + public long getMmNextWriteId() { + return this.mmNextWriteId; + } + + public void setMmNextWriteId(long mmNextWriteId) { + this.mmNextWriteId = mmNextWriteId; + setMmNextWriteIdIsSet(true); + } + + public void unsetMmNextWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MMNEXTWRITEID_ISSET_ID); + } + + /** Returns true if field mmNextWriteId is set (has been assigned a value) and false otherwise */ + public boolean isSetMmNextWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __MMNEXTWRITEID_ISSET_ID); + } + + public void setMmNextWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MMNEXTWRITEID_ISSET_ID, value); + } + + public long getMmWatermarkWriteId() { + return this.mmWatermarkWriteId; + } + + public void setMmWatermarkWriteId(long mmWatermarkWriteId) { + this.mmWatermarkWriteId = mmWatermarkWriteId; + setMmWatermarkWriteIdIsSet(true); + } + + public void unsetMmWatermarkWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MMWATERMARKWRITEID_ISSET_ID); + } + + /** Returns true if field mmWatermarkWriteId is set (has been assigned a value) and false otherwise */ + public boolean isSetMmWatermarkWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __MMWATERMARKWRITEID_ISSET_ID); + } + + public void setMmWatermarkWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MMWATERMARKWRITEID_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_NAME: @@ -783,6 +849,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case MM_NEXT_WRITE_ID: + if (value == null) { + unsetMmNextWriteId(); + } else { + setMmNextWriteId((Long)value); + } + break; + + case MM_WATERMARK_WRITE_ID: + if (value == null) { + unsetMmWatermarkWriteId(); + } else { + setMmWatermarkWriteId((Long)value); + } + break; + } } @@ -830,6 +912,12 @@ public Object getFieldValue(_Fields field) { case TEMPORARY: return isTemporary(); + case MM_NEXT_WRITE_ID: + return getMmNextWriteId(); + + case MM_WATERMARK_WRITE_ID: + return getMmWatermarkWriteId(); + } throw new IllegalStateException(); } @@ -869,6 +957,10 @@ public boolean isSet(_Fields field) { return isSetPrivileges(); case TEMPORARY: return isSetTemporary(); + case MM_NEXT_WRITE_ID: + return isSetMmNextWriteId(); + case MM_WATERMARK_WRITE_ID: + return isSetMmWatermarkWriteId(); } throw new IllegalStateException(); } @@ -1012,6 +1104,24 @@ public boolean equals(Table that) { return false; } + boolean this_present_mmNextWriteId = true && this.isSetMmNextWriteId(); + boolean that_present_mmNextWriteId = true && that.isSetMmNextWriteId(); + if (this_present_mmNextWriteId || that_present_mmNextWriteId) { + if (!(this_present_mmNextWriteId && that_present_mmNextWriteId)) + return false; + if (this.mmNextWriteId != that.mmNextWriteId) + return false; + } + + boolean this_present_mmWatermarkWriteId = true && this.isSetMmWatermarkWriteId(); + boolean that_present_mmWatermarkWriteId = true && that.isSetMmWatermarkWriteId(); + if (this_present_mmWatermarkWriteId || that_present_mmWatermarkWriteId) { + if (!(this_present_mmWatermarkWriteId && that_present_mmWatermarkWriteId)) + return false; + if (this.mmWatermarkWriteId != that.mmWatermarkWriteId) + return false; + } + return true; } @@ -1089,6 +1199,16 @@ public int hashCode() { if (present_temporary) list.add(temporary); + boolean present_mmNextWriteId = true && (isSetMmNextWriteId()); + list.add(present_mmNextWriteId); + if (present_mmNextWriteId) + list.add(mmNextWriteId); + + boolean present_mmWatermarkWriteId = true && (isSetMmWatermarkWriteId()); + list.add(present_mmWatermarkWriteId); + if (present_mmWatermarkWriteId) + list.add(mmWatermarkWriteId); + return list.hashCode(); } @@ -1240,6 +1360,26 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetMmNextWriteId()).compareTo(other.isSetMmNextWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMmNextWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mmNextWriteId, other.mmNextWriteId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMmWatermarkWriteId()).compareTo(other.isSetMmWatermarkWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMmWatermarkWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mmWatermarkWriteId, other.mmWatermarkWriteId); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1359,6 +1499,18 @@ public String toString() { sb.append(this.temporary); first = false; } + if (isSetMmNextWriteId()) { + if (!first) sb.append(", "); + sb.append("mmNextWriteId:"); + sb.append(this.mmNextWriteId); + first = false; + } + if (isSetMmWatermarkWriteId()) { + if (!first) sb.append(", "); + sb.append("mmWatermarkWriteId:"); + sb.append(this.mmWatermarkWriteId); + first = false; + } sb.append(")"); return sb.toString(); } @@ -1547,6 +1699,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 15: // MM_NEXT_WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.mmNextWriteId = iprot.readI64(); + struct.setMmNextWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 16: // MM_WATERMARK_WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.mmWatermarkWriteId = iprot.readI64(); + struct.setMmWatermarkWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1641,6 +1809,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeBool(struct.temporary); oprot.writeFieldEnd(); } + if (struct.isSetMmNextWriteId()) { + oprot.writeFieldBegin(MM_NEXT_WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.mmNextWriteId); + oprot.writeFieldEnd(); + } + if (struct.isSetMmWatermarkWriteId()) { + oprot.writeFieldBegin(MM_WATERMARK_WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.mmWatermarkWriteId); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1701,7 +1879,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetTemporary()) { optionals.set(13); } - oprot.writeBitSet(optionals, 14); + if (struct.isSetMmNextWriteId()) { + optionals.set(14); + } + if (struct.isSetMmWatermarkWriteId()) { + optionals.set(15); + } + oprot.writeBitSet(optionals, 16); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -1757,12 +1941,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetTemporary()) { oprot.writeBool(struct.temporary); } + if (struct.isSetMmNextWriteId()) { + oprot.writeI64(struct.mmNextWriteId); + } + if (struct.isSetMmWatermarkWriteId()) { + oprot.writeI64(struct.mmWatermarkWriteId); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(14); + BitSet incoming = iprot.readBitSet(16); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -1842,6 +2032,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.temporary = iprot.readBool(); struct.setTemporaryIsSet(true); } + if (incoming.get(14)) { + struct.mmNextWriteId = iprot.readI64(); + struct.setMmNextWriteIdIsSet(true); + } + if (incoming.get(15)) { + struct.mmWatermarkWriteId = iprot.readI64(); + struct.setMmWatermarkWriteIdIsSet(true); + } } } diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index cb5dec978396..40907b39600e 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -340,6 +340,12 @@ public interface Iface extends com.facebook.fb303.FacebookService.Iface { public CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req) throws org.apache.thrift.TException; + public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws org.apache.thrift.TException; + + public FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req) throws org.apache.thrift.TException; + + public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) throws org.apache.thrift.TException; + } public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface { @@ -642,6 +648,12 @@ public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIfa public void cache_file_metadata(CacheFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_next_write_id(GetNextWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void finalize_write_id(FinalizeWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void heartbeat_write_id(HeartbeatWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface { @@ -4963,6 +4975,75 @@ public CacheFileMetadataResult recv_cache_file_metadata() throws org.apache.thri throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "cache_file_metadata failed: unknown result"); } + public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws org.apache.thrift.TException + { + send_get_next_write_id(req); + return recv_get_next_write_id(); + } + + public void send_get_next_write_id(GetNextWriteIdRequest req) throws org.apache.thrift.TException + { + get_next_write_id_args args = new get_next_write_id_args(); + args.setReq(req); + sendBase("get_next_write_id", args); + } + + public GetNextWriteIdResult recv_get_next_write_id() throws org.apache.thrift.TException + { + get_next_write_id_result result = new get_next_write_id_result(); + receiveBase(result, "get_next_write_id"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_next_write_id failed: unknown result"); + } + + public FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req) throws org.apache.thrift.TException + { + send_finalize_write_id(req); + return recv_finalize_write_id(); + } + + public void send_finalize_write_id(FinalizeWriteIdRequest req) throws org.apache.thrift.TException + { + finalize_write_id_args args = new finalize_write_id_args(); + args.setReq(req); + sendBase("finalize_write_id", args); + } + + public FinalizeWriteIdResult recv_finalize_write_id() throws org.apache.thrift.TException + { + finalize_write_id_result result = new finalize_write_id_result(); + receiveBase(result, "finalize_write_id"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "finalize_write_id failed: unknown result"); + } + + public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) throws org.apache.thrift.TException + { + send_heartbeat_write_id(req); + return recv_heartbeat_write_id(); + } + + public void send_heartbeat_write_id(HeartbeatWriteIdRequest req) throws org.apache.thrift.TException + { + heartbeat_write_id_args args = new heartbeat_write_id_args(); + args.setReq(req); + sendBase("heartbeat_write_id", args); + } + + public HeartbeatWriteIdResult recv_heartbeat_write_id() throws org.apache.thrift.TException + { + heartbeat_write_id_result result = new heartbeat_write_id_result(); + receiveBase(result, "heartbeat_write_id"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "heartbeat_write_id failed: unknown result"); + } + } public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface { public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -10232,6 +10313,102 @@ public CacheFileMetadataResult getResult() throws org.apache.thrift.TException { } } + public void get_next_write_id(GetNextWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_next_write_id_call method_call = new get_next_write_id_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_next_write_id_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetNextWriteIdRequest req; + public get_next_write_id_call(GetNextWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_next_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_next_write_id_args args = new get_next_write_id_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public GetNextWriteIdResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_next_write_id(); + } + } + + public void finalize_write_id(FinalizeWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + finalize_write_id_call method_call = new finalize_write_id_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class finalize_write_id_call extends org.apache.thrift.async.TAsyncMethodCall { + private FinalizeWriteIdRequest req; + public finalize_write_id_call(FinalizeWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("finalize_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0)); + finalize_write_id_args args = new finalize_write_id_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public FinalizeWriteIdResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_finalize_write_id(); + } + } + + public void heartbeat_write_id(HeartbeatWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + heartbeat_write_id_call method_call = new heartbeat_write_id_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class heartbeat_write_id_call extends org.apache.thrift.async.TAsyncMethodCall { + private HeartbeatWriteIdRequest req; + public heartbeat_write_id_call(HeartbeatWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("heartbeat_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0)); + heartbeat_write_id_args args = new heartbeat_write_id_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public HeartbeatWriteIdResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_heartbeat_write_id(); + } + } + } public static class Processor extends com.facebook.fb303.FacebookService.Processor implements org.apache.thrift.TProcessor { @@ -10394,6 +10571,9 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public get_next_write_id() { + super("get_next_write_id"); + } + + public get_next_write_id_args getEmptyArgsInstance() { + return new get_next_write_id_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_next_write_id_result getResult(I iface, get_next_write_id_args args) throws org.apache.thrift.TException { + get_next_write_id_result result = new get_next_write_id_result(); + result.success = iface.get_next_write_id(args.req); + return result; + } + } + + public static class finalize_write_id extends org.apache.thrift.ProcessFunction { + public finalize_write_id() { + super("finalize_write_id"); + } + + public finalize_write_id_args getEmptyArgsInstance() { + return new finalize_write_id_args(); + } + + protected boolean isOneway() { + return false; + } + + public finalize_write_id_result getResult(I iface, finalize_write_id_args args) throws org.apache.thrift.TException { + finalize_write_id_result result = new finalize_write_id_result(); + result.success = iface.finalize_write_id(args.req); + return result; + } + } + + public static class heartbeat_write_id extends org.apache.thrift.ProcessFunction { + public heartbeat_write_id() { + super("heartbeat_write_id"); + } + + public heartbeat_write_id_args getEmptyArgsInstance() { + return new heartbeat_write_id_args(); + } + + protected boolean isOneway() { + return false; + } + + public heartbeat_write_id_result getResult(I iface, heartbeat_write_id_args args) throws org.apache.thrift.TException { + heartbeat_write_id_result result = new heartbeat_write_id_result(); + result.success = iface.heartbeat_write_id(args.req); + return result; + } + } + } public static class AsyncProcessor extends com.facebook.fb303.FacebookService.AsyncProcessor { @@ -14365,6 +14605,9 @@ protected AsyncProcessor(I iface, Map, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_args"); - - private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); - - private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); - static { - schemes.put(StandardScheme.class, new getMetaConf_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getMetaConf_argsTupleSchemeFactory()); - } - - private String key; // required - - /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ - public enum _Fields implements org.apache.thrift.TFieldIdEnum { - KEY((short)1, "key"); - - private static final Map byName = new HashMap(); - - static { - for (_Fields field : EnumSet.allOf(_Fields.class)) { - byName.put(field.getFieldName(), field); - } - } - - /** - * Find the _Fields constant that matches fieldId, or null if its not found. - */ - public static _Fields findByThriftId(int fieldId) { - switch(fieldId) { - case 1: // KEY - return KEY; - default: - return null; - } - } - - /** - * Find the _Fields constant that matches fieldId, throwing an exception - * if it is not found. - */ - public static _Fields findByThriftIdOrThrow(int fieldId) { - _Fields fields = findByThriftId(fieldId); - if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); - return fields; - } - - /** - * Find the _Fields constant that matches name, or null if its not found. - */ - public static _Fields findByName(String name) { - return byName.get(name); - } - - private final short _thriftId; - private final String _fieldName; - - _Fields(short thriftId, String fieldName) { - _thriftId = thriftId; - _fieldName = fieldName; - } - - public short getThriftFieldId() { - return _thriftId; + public static class get_next_write_id extends org.apache.thrift.AsyncProcessFunction { + public get_next_write_id() { + super("get_next_write_id"); } - public String getFieldName() { - return _fieldName; + public get_next_write_id_args getEmptyArgsInstance() { + return new get_next_write_id_args(); } - } - - // isset id assignments - public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; - static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMetaConf_args.class, metaDataMap); - } - - public getMetaConf_args() { - } - - public getMetaConf_args( - String key) - { - this(); - this.key = key; - } - /** - * Performs a deep copy on other. - */ - public getMetaConf_args(getMetaConf_args other) { - if (other.isSetKey()) { - this.key = other.key; + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(GetNextWriteIdResult o) { + get_next_write_id_result result = new get_next_write_id_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_next_write_id_result result = new get_next_write_id_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - } - - public getMetaConf_args deepCopy() { - return new getMetaConf_args(this); - } - - @Override - public void clear() { - this.key = null; - } - - public String getKey() { - return this.key; - } - - public void setKey(String key) { - this.key = key; - } - - public void unsetKey() { - this.key = null; - } - - /** Returns true if field key is set (has been assigned a value) and false otherwise */ - public boolean isSetKey() { - return this.key != null; - } - public void setKeyIsSet(boolean value) { - if (!value) { - this.key = null; + protected boolean isOneway() { + return false; } - } - - public void setFieldValue(_Fields field, Object value) { - switch (field) { - case KEY: - if (value == null) { - unsetKey(); - } else { - setKey((String)value); - } - break; + public void start(I iface, get_next_write_id_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_next_write_id(args.req,resultHandler); } } - public Object getFieldValue(_Fields field) { - switch (field) { - case KEY: - return getKey(); - + public static class finalize_write_id extends org.apache.thrift.AsyncProcessFunction { + public finalize_write_id() { + super("finalize_write_id"); } - throw new IllegalStateException(); - } - /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ - public boolean isSet(_Fields field) { - if (field == null) { - throw new IllegalArgumentException(); + public finalize_write_id_args getEmptyArgsInstance() { + return new finalize_write_id_args(); } - switch (field) { - case KEY: - return isSetKey(); + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(FinalizeWriteIdResult o) { + finalize_write_id_result result = new finalize_write_id_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + finalize_write_id_result result = new finalize_write_id_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; } - throw new IllegalStateException(); - } - - @Override - public boolean equals(Object that) { - if (that == null) - return false; - if (that instanceof getMetaConf_args) - return this.equals((getMetaConf_args)that); - return false; - } - public boolean equals(getMetaConf_args that) { - if (that == null) + protected boolean isOneway() { return false; - - boolean this_present_key = true && this.isSetKey(); - boolean that_present_key = true && that.isSetKey(); - if (this_present_key || that_present_key) { - if (!(this_present_key && that_present_key)) - return false; - if (!this.key.equals(that.key)) - return false; - } - - return true; - } - - @Override - public int hashCode() { - List list = new ArrayList(); - - boolean present_key = true && (isSetKey()); - list.add(present_key); - if (present_key) - list.add(key); - - return list.hashCode(); - } - - @Override - public int compareTo(getMetaConf_args other) { - if (!getClass().equals(other.getClass())) { - return getClass().getName().compareTo(other.getClass().getName()); - } - - int lastComparison = 0; - - lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetKey()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); - if (lastComparison != 0) { - return lastComparison; - } - } - return 0; - } - - public _Fields fieldForId(int fieldId) { - return _Fields.findByThriftId(fieldId); - } - - public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { - schemes.get(iprot.getScheme()).getScheme().read(iprot, this); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { - schemes.get(oprot.getScheme()).getScheme().write(oprot, this); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder("getMetaConf_args("); - boolean first = true; - - sb.append("key:"); - if (this.key == null) { - sb.append("null"); - } else { - sb.append(this.key); } - first = false; - sb.append(")"); - return sb.toString(); - } - - public void validate() throws org.apache.thrift.TException { - // check for required fields - // check for sub-struct validity - } - private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { - try { - write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); + public void start(I iface, finalize_write_id_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.finalize_write_id(args.req,resultHandler); } } - private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { - try { - read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); - } catch (org.apache.thrift.TException te) { - throw new java.io.IOException(te); + public static class heartbeat_write_id extends org.apache.thrift.AsyncProcessFunction { + public heartbeat_write_id() { + super("heartbeat_write_id"); } - } - private static class getMetaConf_argsStandardSchemeFactory implements SchemeFactory { - public getMetaConf_argsStandardScheme getScheme() { - return new getMetaConf_argsStandardScheme(); + public heartbeat_write_id_args getEmptyArgsInstance() { + return new heartbeat_write_id_args(); } - } - - private static class getMetaConf_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_args struct) throws org.apache.thrift.TException { - org.apache.thrift.protocol.TField schemeField; - iprot.readStructBegin(); - while (true) - { - schemeField = iprot.readFieldBegin(); - if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { - break; + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(HeartbeatWriteIdResult o) { + heartbeat_write_id_result result = new heartbeat_write_id_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); } - switch (schemeField.id) { - case 1: // KEY - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.key = iprot.readString(); - struct.setKeyIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - default: - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + heartbeat_write_id_result result = new heartbeat_write_id_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); } - iprot.readFieldEnd(); - } - iprot.readStructEnd(); - struct.validate(); - } - - public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_args struct) throws org.apache.thrift.TException { - struct.validate(); - - oprot.writeStructBegin(STRUCT_DESC); - if (struct.key != null) { - oprot.writeFieldBegin(KEY_FIELD_DESC); - oprot.writeString(struct.key); - oprot.writeFieldEnd(); - } - oprot.writeFieldStop(); - oprot.writeStructEnd(); - } - - } - - private static class getMetaConf_argsTupleSchemeFactory implements SchemeFactory { - public getMetaConf_argsTupleScheme getScheme() { - return new getMetaConf_argsTupleScheme(); + }; } - } - - private static class getMetaConf_argsTupleScheme extends TupleScheme { - @Override - public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { - TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetKey()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetKey()) { - oprot.writeString(struct.key); - } + protected boolean isOneway() { + return false; } - @Override - public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { - TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.key = iprot.readString(); - struct.setKeyIsSet(true); - } + public void start(I iface, heartbeat_write_id_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.heartbeat_write_id(args.req,resultHandler); } } } - public static class getMetaConf_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_result"); + public static class getMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_args"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); - private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField KEY_FIELD_DESC = new org.apache.thrift.protocol.TField("key", org.apache.thrift.protocol.TType.STRING, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new getMetaConf_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new getMetaConf_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new getMetaConf_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getMetaConf_argsTupleSchemeFactory()); } - private String success; // required - private MetaException o1; // required + private String key; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"), - O1((short)1, "o1"); + KEY((short)1, "key"); private static final Map byName = new HashMap(); @@ -23840,10 +23875,371 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; - case 1: // O1 - return O1; + case 1: // KEY + return KEY; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.KEY, new org.apache.thrift.meta_data.FieldMetaData("key", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getMetaConf_args.class, metaDataMap); + } + + public getMetaConf_args() { + } + + public getMetaConf_args( + String key) + { + this(); + this.key = key; + } + + /** + * Performs a deep copy on other. + */ + public getMetaConf_args(getMetaConf_args other) { + if (other.isSetKey()) { + this.key = other.key; + } + } + + public getMetaConf_args deepCopy() { + return new getMetaConf_args(this); + } + + @Override + public void clear() { + this.key = null; + } + + public String getKey() { + return this.key; + } + + public void setKey(String key) { + this.key = key; + } + + public void unsetKey() { + this.key = null; + } + + /** Returns true if field key is set (has been assigned a value) and false otherwise */ + public boolean isSetKey() { + return this.key != null; + } + + public void setKeyIsSet(boolean value) { + if (!value) { + this.key = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case KEY: + if (value == null) { + unsetKey(); + } else { + setKey((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case KEY: + return getKey(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case KEY: + return isSetKey(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getMetaConf_args) + return this.equals((getMetaConf_args)that); + return false; + } + + public boolean equals(getMetaConf_args that) { + if (that == null) + return false; + + boolean this_present_key = true && this.isSetKey(); + boolean that_present_key = true && that.isSetKey(); + if (this_present_key || that_present_key) { + if (!(this_present_key && that_present_key)) + return false; + if (!this.key.equals(that.key)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_key = true && (isSetKey()); + list.add(present_key); + if (present_key) + list.add(key); + + return list.hashCode(); + } + + @Override + public int compareTo(getMetaConf_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetKey()).compareTo(other.isSetKey()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetKey()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key, other.key); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getMetaConf_args("); + boolean first = true; + + sb.append("key:"); + if (this.key == null) { + sb.append("null"); + } else { + sb.append(this.key); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getMetaConf_argsStandardSchemeFactory implements SchemeFactory { + public getMetaConf_argsStandardScheme getScheme() { + return new getMetaConf_argsStandardScheme(); + } + } + + private static class getMetaConf_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getMetaConf_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // KEY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.key = iprot.readString(); + struct.setKeyIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getMetaConf_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.key != null) { + oprot.writeFieldBegin(KEY_FIELD_DESC); + oprot.writeString(struct.key); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getMetaConf_argsTupleSchemeFactory implements SchemeFactory { + public getMetaConf_argsTupleScheme getScheme() { + return new getMetaConf_argsTupleScheme(); + } + } + + private static class getMetaConf_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetKey()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetKey()) { + oprot.writeString(struct.key); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getMetaConf_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.key = iprot.readString(); + struct.setKeyIsSet(true); + } + } + } + + } + + public static class getMetaConf_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getMetaConf_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getMetaConf_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getMetaConf_resultTupleSchemeFactory()); + } + + private String success; // required + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; default: return null; } @@ -167773,7 +168169,2171 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("compact_args("); + StringBuilder sb = new StringBuilder("compact_args("); + boolean first = true; + + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class compact_argsStandardSchemeFactory implements SchemeFactory { + public compact_argsStandardScheme getScheme() { + return new compact_argsStandardScheme(); + } + } + + private static class compact_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new CompactionRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class compact_argsTupleSchemeFactory implements SchemeFactory { + public compact_argsTupleScheme getScheme() { + return new compact_argsTupleScheme(); + } + } + + private static class compact_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new CompactionRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } + } + } + + } + + public static class compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_result"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new compact_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new compact_resultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_result.class, metaDataMap); + } + + public compact_result() { + } + + /** + * Performs a deep copy on other. + */ + public compact_result(compact_result other) { + } + + public compact_result deepCopy() { + return new compact_result(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof compact_result) + return this.equals((compact_result)that); + return false; + } + + public boolean equals(compact_result that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(compact_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("compact_result("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class compact_resultStandardSchemeFactory implements SchemeFactory { + public compact_resultStandardScheme getScheme() { + return new compact_resultStandardScheme(); + } + } + + private static class compact_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class compact_resultTupleSchemeFactory implements SchemeFactory { + public compact_resultTupleScheme getScheme() { + return new compact_resultTupleScheme(); + } + } + + private static class compact_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class show_compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_args"); + + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new show_compact_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new show_compact_argsTupleSchemeFactory()); + } + + private ShowCompactRequest rqst; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RQST((short)1, "rqst"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RQST + return RQST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_args.class, metaDataMap); + } + + public show_compact_args() { + } + + public show_compact_args( + ShowCompactRequest rqst) + { + this(); + this.rqst = rqst; + } + + /** + * Performs a deep copy on other. + */ + public show_compact_args(show_compact_args other) { + if (other.isSetRqst()) { + this.rqst = new ShowCompactRequest(other.rqst); + } + } + + public show_compact_args deepCopy() { + return new show_compact_args(this); + } + + @Override + public void clear() { + this.rqst = null; + } + + public ShowCompactRequest getRqst() { + return this.rqst; + } + + public void setRqst(ShowCompactRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((ShowCompactRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RQST: + return getRqst(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RQST: + return isSetRqst(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof show_compact_args) + return this.equals((show_compact_args)that); + return false; + } + + public boolean equals(show_compact_args that) { + if (that == null) + return false; + + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + + return list.hashCode(); + } + + @Override + public int compareTo(show_compact_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("show_compact_args("); + boolean first = true; + + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class show_compact_argsStandardSchemeFactory implements SchemeFactory { + public show_compact_argsStandardScheme getScheme() { + return new show_compact_argsStandardScheme(); + } + } + + private static class show_compact_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new ShowCompactRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class show_compact_argsTupleSchemeFactory implements SchemeFactory { + public show_compact_argsTupleScheme getScheme() { + return new show_compact_argsTupleScheme(); + } + } + + private static class show_compact_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new ShowCompactRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } + } + } + + } + + public static class show_compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new show_compact_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new show_compact_resultTupleSchemeFactory()); + } + + private ShowCompactResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_result.class, metaDataMap); + } + + public show_compact_result() { + } + + public show_compact_result( + ShowCompactResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public show_compact_result(show_compact_result other) { + if (other.isSetSuccess()) { + this.success = new ShowCompactResponse(other.success); + } + } + + public show_compact_result deepCopy() { + return new show_compact_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public ShowCompactResponse getSuccess() { + return this.success; + } + + public void setSuccess(ShowCompactResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((ShowCompactResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof show_compact_result) + return this.equals((show_compact_result)that); + return false; + } + + public boolean equals(show_compact_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(show_compact_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("show_compact_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class show_compact_resultStandardSchemeFactory implements SchemeFactory { + public show_compact_resultStandardScheme getScheme() { + return new show_compact_resultStandardScheme(); + } + } + + private static class show_compact_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new ShowCompactResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class show_compact_resultTupleSchemeFactory implements SchemeFactory { + public show_compact_resultTupleScheme getScheme() { + return new show_compact_resultTupleScheme(); + } + } + + private static class show_compact_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new ShowCompactResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + + public static class add_dynamic_partitions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_args"); + + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new add_dynamic_partitions_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_dynamic_partitions_argsTupleSchemeFactory()); + } + + private AddDynamicPartitions rqst; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RQST((short)1, "rqst"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RQST + return RQST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AddDynamicPartitions.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_args.class, metaDataMap); + } + + public add_dynamic_partitions_args() { + } + + public add_dynamic_partitions_args( + AddDynamicPartitions rqst) + { + this(); + this.rqst = rqst; + } + + /** + * Performs a deep copy on other. + */ + public add_dynamic_partitions_args(add_dynamic_partitions_args other) { + if (other.isSetRqst()) { + this.rqst = new AddDynamicPartitions(other.rqst); + } + } + + public add_dynamic_partitions_args deepCopy() { + return new add_dynamic_partitions_args(this); + } + + @Override + public void clear() { + this.rqst = null; + } + + public AddDynamicPartitions getRqst() { + return this.rqst; + } + + public void setRqst(AddDynamicPartitions rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((AddDynamicPartitions)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RQST: + return getRqst(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RQST: + return isSetRqst(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof add_dynamic_partitions_args) + return this.equals((add_dynamic_partitions_args)that); + return false; + } + + public boolean equals(add_dynamic_partitions_args that) { + if (that == null) + return false; + + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + + return list.hashCode(); + } + + @Override + public int compareTo(add_dynamic_partitions_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("add_dynamic_partitions_args("); + boolean first = true; + + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class add_dynamic_partitions_argsStandardSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_argsStandardScheme getScheme() { + return new add_dynamic_partitions_argsStandardScheme(); + } + } + + private static class add_dynamic_partitions_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new AddDynamicPartitions(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class add_dynamic_partitions_argsTupleSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_argsTupleScheme getScheme() { + return new add_dynamic_partitions_argsTupleScheme(); + } + } + + private static class add_dynamic_partitions_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new AddDynamicPartitions(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } + } + } + + } + + public static class add_dynamic_partitions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new add_dynamic_partitions_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_dynamic_partitions_resultTupleSchemeFactory()); + } + + private NoSuchTxnException o1; // required + private TxnAbortedException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_result.class, metaDataMap); + } + + public add_dynamic_partitions_result() { + } + + public add_dynamic_partitions_result( + NoSuchTxnException o1, + TxnAbortedException o2) + { + this(); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public add_dynamic_partitions_result(add_dynamic_partitions_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchTxnException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new TxnAbortedException(other.o2); + } + } + + public add_dynamic_partitions_result deepCopy() { + return new add_dynamic_partitions_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + } + + public NoSuchTxnException getO1() { + return this.o1; + } + + public void setO1(NoSuchTxnException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public TxnAbortedException getO2() { + return this.o2; + } + + public void setO2(TxnAbortedException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchTxnException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((TxnAbortedException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof add_dynamic_partitions_result) + return this.equals((add_dynamic_partitions_result)that); + return false; + } + + public boolean equals(add_dynamic_partitions_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(add_dynamic_partitions_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("add_dynamic_partitions_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class add_dynamic_partitions_resultStandardSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_resultStandardScheme getScheme() { + return new add_dynamic_partitions_resultStandardScheme(); + } + } + + private static class add_dynamic_partitions_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new TxnAbortedException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class add_dynamic_partitions_resultTupleSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_resultTupleScheme getScheme() { + return new add_dynamic_partitions_resultTupleScheme(); + } + } + + private static class add_dynamic_partitions_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new NoSuchTxnException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new TxnAbortedException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + public static class get_next_notification_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_args"); + + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_next_notification_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_notification_argsTupleSchemeFactory()); + } + + private NotificationEventRequest rqst; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RQST((short)1, "rqst"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RQST + return RQST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_args.class, metaDataMap); + } + + public get_next_notification_args() { + } + + public get_next_notification_args( + NotificationEventRequest rqst) + { + this(); + this.rqst = rqst; + } + + /** + * Performs a deep copy on other. + */ + public get_next_notification_args(get_next_notification_args other) { + if (other.isSetRqst()) { + this.rqst = new NotificationEventRequest(other.rqst); + } + } + + public get_next_notification_args deepCopy() { + return new get_next_notification_args(this); + } + + @Override + public void clear() { + this.rqst = null; + } + + public NotificationEventRequest getRqst() { + return this.rqst; + } + + public void setRqst(NotificationEventRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((NotificationEventRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RQST: + return getRqst(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RQST: + return isSetRqst(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_next_notification_args) + return this.equals((get_next_notification_args)that); + return false; + } + + public boolean equals(get_next_notification_args that) { + if (that == null) + return false; + + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + + return list.hashCode(); + } + + @Override + public int compareTo(get_next_notification_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_next_notification_args("); boolean first = true; sb.append("rqst:"); @@ -167811,15 +170371,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class compact_argsStandardSchemeFactory implements SchemeFactory { - public compact_argsStandardScheme getScheme() { - return new compact_argsStandardScheme(); + private static class get_next_notification_argsStandardSchemeFactory implements SchemeFactory { + public get_next_notification_argsStandardScheme getScheme() { + return new get_next_notification_argsStandardScheme(); } } - private static class compact_argsStandardScheme extends StandardScheme { + private static class get_next_notification_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -167831,7 +170391,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new CompactionRequest(); + struct.rqst = new NotificationEventRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -167847,7 +170407,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -167862,16 +170422,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struc } - private static class compact_argsTupleSchemeFactory implements SchemeFactory { - public compact_argsTupleScheme getScheme() { - return new compact_argsTupleScheme(); + private static class get_next_notification_argsTupleSchemeFactory implements SchemeFactory { + public get_next_notification_argsTupleScheme getScheme() { + return new get_next_notification_argsTupleScheme(); } } - private static class compact_argsTupleScheme extends TupleScheme { + private static class get_next_notification_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -167884,11 +170444,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new CompactionRequest(); + struct.rqst = new NotificationEventRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -167897,20 +170457,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) } - public static class compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_result"); + public static class get_next_notification_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new compact_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new compact_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_next_notification_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_notification_resultTupleSchemeFactory()); } + private NotificationEventResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + SUCCESS((short)0, "success"); private static final Map byName = new HashMap(); @@ -167925,6 +170487,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; default: return null; } @@ -167963,37 +170527,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_result.class, metaDataMap); } - public compact_result() { + public get_next_notification_result() { + } + + public get_next_notification_result( + NotificationEventResponse success) + { + this(); + this.success = success; } /** * Performs a deep copy on other. */ - public compact_result(compact_result other) { + public get_next_notification_result(get_next_notification_result other) { + if (other.isSetSuccess()) { + this.success = new NotificationEventResponse(other.success); + } } - public compact_result deepCopy() { - return new compact_result(this); + public get_next_notification_result deepCopy() { + return new get_next_notification_result(this); } @Override public void clear() { + this.success = null; + } + + public NotificationEventResponse getSuccess() { + return this.success; + } + + public void setSuccess(NotificationEventResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((NotificationEventResponse)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + } throw new IllegalStateException(); } @@ -168005,6 +170618,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); } throw new IllegalStateException(); } @@ -168013,15 +170628,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof compact_result) - return this.equals((compact_result)that); + if (that instanceof get_next_notification_result) + return this.equals((get_next_notification_result)that); return false; } - public boolean equals(compact_result that) { + public boolean equals(get_next_notification_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + return true; } @@ -168029,17 +170653,32 @@ public boolean equals(compact_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + return list.hashCode(); } @Override - public int compareTo(compact_result other) { + public int compareTo(get_next_notification_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -168057,9 +170696,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("compact_result("); + StringBuilder sb = new StringBuilder("get_next_notification_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; sb.append(")"); return sb.toString(); } @@ -168067,6 +170713,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -168085,15 +170734,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class compact_resultStandardSchemeFactory implements SchemeFactory { - public compact_resultStandardScheme getScheme() { - return new compact_resultStandardScheme(); + private static class get_next_notification_resultStandardSchemeFactory implements SchemeFactory { + public get_next_notification_resultStandardScheme getScheme() { + return new get_next_notification_resultStandardScheme(); } } - private static class compact_resultStandardScheme extends StandardScheme { + private static class get_next_notification_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -168103,6 +170752,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result stru break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new NotificationEventResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -168112,53 +170770,70 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result stru struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class compact_resultTupleSchemeFactory implements SchemeFactory { - public compact_resultTupleScheme getScheme() { - return new compact_resultTupleScheme(); + private static class get_next_notification_resultTupleSchemeFactory implements SchemeFactory { + public get_next_notification_resultTupleScheme getScheme() { + return new get_next_notification_resultTupleScheme(); } } - private static class compact_resultTupleScheme extends TupleScheme { + private static class get_next_notification_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new NotificationEventResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } } } } - public static class show_compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_args"); + public static class get_current_notificationEventId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new show_compact_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new show_compact_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_current_notificationEventId_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_current_notificationEventId_argsTupleSchemeFactory()); } - private ShowCompactRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); +; private static final Map byName = new HashMap(); @@ -168173,8 +170848,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; default: return null; } @@ -168213,86 +170886,37 @@ public String getFieldName() { return _fieldName; } } - - // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_args.class, metaDataMap); - } - - public show_compact_args() { + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_args.class, metaDataMap); } - public show_compact_args( - ShowCompactRequest rqst) - { - this(); - this.rqst = rqst; + public get_current_notificationEventId_args() { } /** * Performs a deep copy on other. */ - public show_compact_args(show_compact_args other) { - if (other.isSetRqst()) { - this.rqst = new ShowCompactRequest(other.rqst); - } + public get_current_notificationEventId_args(get_current_notificationEventId_args other) { } - public show_compact_args deepCopy() { - return new show_compact_args(this); + public get_current_notificationEventId_args deepCopy() { + return new get_current_notificationEventId_args(this); } @Override public void clear() { - this.rqst = null; - } - - public ShowCompactRequest getRqst() { - return this.rqst; - } - - public void setRqst(ShowCompactRequest rqst) { - this.rqst = rqst; - } - - public void unsetRqst() { - this.rqst = null; - } - - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; - } - - public void setRqstIsSet(boolean value) { - if (!value) { - this.rqst = null; - } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: - if (value == null) { - unsetRqst(); - } else { - setRqst((ShowCompactRequest)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); - } throw new IllegalStateException(); } @@ -168304,8 +170928,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); } throw new IllegalStateException(); } @@ -168314,24 +170936,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof show_compact_args) - return this.equals((show_compact_args)that); + if (that instanceof get_current_notificationEventId_args) + return this.equals((get_current_notificationEventId_args)that); return false; } - public boolean equals(show_compact_args that) { + public boolean equals(get_current_notificationEventId_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) - return false; - if (!this.rqst.equals(that.rqst)) - return false; - } - return true; } @@ -168339,32 +170952,17 @@ public boolean equals(show_compact_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); - return list.hashCode(); } @Override - public int compareTo(show_compact_args other) { + public int compareTo(get_current_notificationEventId_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -168382,16 +170980,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("show_compact_args("); + StringBuilder sb = new StringBuilder("get_current_notificationEventId_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { - sb.append("null"); - } else { - sb.append(this.rqst); - } - first = false; sb.append(")"); return sb.toString(); } @@ -168399,9 +170990,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -168420,15 +171008,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class show_compact_argsStandardSchemeFactory implements SchemeFactory { - public show_compact_argsStandardScheme getScheme() { - return new show_compact_argsStandardScheme(); + private static class get_current_notificationEventId_argsStandardSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_argsStandardScheme getScheme() { + return new get_current_notificationEventId_argsStandardScheme(); } } - private static class show_compact_argsStandardScheme extends StandardScheme { + private static class get_current_notificationEventId_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -168438,15 +171026,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args s break; } switch (schemeField.id) { - case 1: // RQST - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new ShowCompactRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -168456,68 +171035,49 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class show_compact_argsTupleSchemeFactory implements SchemeFactory { - public show_compact_argsTupleScheme getScheme() { - return new show_compact_argsTupleScheme(); + private static class get_current_notificationEventId_argsTupleSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_argsTupleScheme getScheme() { + return new get_current_notificationEventId_argsTupleScheme(); } } - private static class show_compact_argsTupleScheme extends TupleScheme { + private static class get_current_notificationEventId_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.rqst = new ShowCompactRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); - } } } } - public static class show_compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_result"); + public static class get_current_notificationEventId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new show_compact_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new show_compact_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_current_notificationEventId_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_current_notificationEventId_resultTupleSchemeFactory()); } - private ShowCompactResponse success; // required + private CurrentNotificationEventId success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -168582,16 +171142,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CurrentNotificationEventId.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_result.class, metaDataMap); } - public show_compact_result() { + public get_current_notificationEventId_result() { } - public show_compact_result( - ShowCompactResponse success) + public get_current_notificationEventId_result( + CurrentNotificationEventId success) { this(); this.success = success; @@ -168600,14 +171160,14 @@ public show_compact_result( /** * Performs a deep copy on other. */ - public show_compact_result(show_compact_result other) { + public get_current_notificationEventId_result(get_current_notificationEventId_result other) { if (other.isSetSuccess()) { - this.success = new ShowCompactResponse(other.success); + this.success = new CurrentNotificationEventId(other.success); } } - public show_compact_result deepCopy() { - return new show_compact_result(this); + public get_current_notificationEventId_result deepCopy() { + return new get_current_notificationEventId_result(this); } @Override @@ -168615,11 +171175,11 @@ public void clear() { this.success = null; } - public ShowCompactResponse getSuccess() { + public CurrentNotificationEventId getSuccess() { return this.success; } - public void setSuccess(ShowCompactResponse success) { + public void setSuccess(CurrentNotificationEventId success) { this.success = success; } @@ -168644,7 +171204,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((ShowCompactResponse)value); + setSuccess((CurrentNotificationEventId)value); } break; @@ -168677,12 +171237,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof show_compact_result) - return this.equals((show_compact_result)that); + if (that instanceof get_current_notificationEventId_result) + return this.equals((get_current_notificationEventId_result)that); return false; } - public boolean equals(show_compact_result that) { + public boolean equals(get_current_notificationEventId_result that) { if (that == null) return false; @@ -168711,7 +171271,7 @@ public int hashCode() { } @Override - public int compareTo(show_compact_result other) { + public int compareTo(get_current_notificationEventId_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -168745,7 +171305,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("show_compact_result("); + StringBuilder sb = new StringBuilder("get_current_notificationEventId_result("); boolean first = true; sb.append("success:"); @@ -168783,15 +171343,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class show_compact_resultStandardSchemeFactory implements SchemeFactory { - public show_compact_resultStandardScheme getScheme() { - return new show_compact_resultStandardScheme(); + private static class get_current_notificationEventId_resultStandardSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_resultStandardScheme getScheme() { + return new get_current_notificationEventId_resultStandardScheme(); } } - private static class show_compact_resultStandardScheme extends StandardScheme { + private static class get_current_notificationEventId_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -168803,7 +171363,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new ShowCompactResponse(); + struct.success = new CurrentNotificationEventId(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -168819,7 +171379,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -168834,16 +171394,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_resul } - private static class show_compact_resultTupleSchemeFactory implements SchemeFactory { - public show_compact_resultTupleScheme getScheme() { - return new show_compact_resultTupleScheme(); + private static class get_current_notificationEventId_resultTupleSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_resultTupleScheme getScheme() { + return new get_current_notificationEventId_resultTupleScheme(); } } - private static class show_compact_resultTupleScheme extends TupleScheme { + private static class get_current_notificationEventId_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -168856,11 +171416,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_result } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new ShowCompactResponse(); + struct.success = new CurrentNotificationEventId(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -168869,18 +171429,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_result } - public static class add_dynamic_partitions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_args"); + public static class fire_listener_event_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new add_dynamic_partitions_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new add_dynamic_partitions_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new fire_listener_event_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fire_listener_event_argsTupleSchemeFactory()); } - private AddDynamicPartitions rqst; // required + private FireEventRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -168945,16 +171505,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AddDynamicPartitions.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_args.class, metaDataMap); } - public add_dynamic_partitions_args() { + public fire_listener_event_args() { } - public add_dynamic_partitions_args( - AddDynamicPartitions rqst) + public fire_listener_event_args( + FireEventRequest rqst) { this(); this.rqst = rqst; @@ -168963,14 +171523,14 @@ public add_dynamic_partitions_args( /** * Performs a deep copy on other. */ - public add_dynamic_partitions_args(add_dynamic_partitions_args other) { + public fire_listener_event_args(fire_listener_event_args other) { if (other.isSetRqst()) { - this.rqst = new AddDynamicPartitions(other.rqst); + this.rqst = new FireEventRequest(other.rqst); } } - public add_dynamic_partitions_args deepCopy() { - return new add_dynamic_partitions_args(this); + public fire_listener_event_args deepCopy() { + return new fire_listener_event_args(this); } @Override @@ -168978,11 +171538,11 @@ public void clear() { this.rqst = null; } - public AddDynamicPartitions getRqst() { + public FireEventRequest getRqst() { return this.rqst; } - public void setRqst(AddDynamicPartitions rqst) { + public void setRqst(FireEventRequest rqst) { this.rqst = rqst; } @@ -169007,7 +171567,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((AddDynamicPartitions)value); + setRqst((FireEventRequest)value); } break; @@ -169040,12 +171600,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof add_dynamic_partitions_args) - return this.equals((add_dynamic_partitions_args)that); + if (that instanceof fire_listener_event_args) + return this.equals((fire_listener_event_args)that); return false; } - public boolean equals(add_dynamic_partitions_args that) { + public boolean equals(fire_listener_event_args that) { if (that == null) return false; @@ -169074,7 +171634,7 @@ public int hashCode() { } @Override - public int compareTo(add_dynamic_partitions_args other) { + public int compareTo(fire_listener_event_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -169108,7 +171668,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("add_dynamic_partitions_args("); + StringBuilder sb = new StringBuilder("fire_listener_event_args("); boolean first = true; sb.append("rqst:"); @@ -169146,15 +171706,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class add_dynamic_partitions_argsStandardSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_argsStandardScheme getScheme() { - return new add_dynamic_partitions_argsStandardScheme(); + private static class fire_listener_event_argsStandardSchemeFactory implements SchemeFactory { + public fire_listener_event_argsStandardScheme getScheme() { + return new fire_listener_event_argsStandardScheme(); } } - private static class add_dynamic_partitions_argsStandardScheme extends StandardScheme { + private static class fire_listener_event_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -169166,7 +171726,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new AddDynamicPartitions(); + struct.rqst = new FireEventRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -169182,7 +171742,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -169197,16 +171757,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partit } - private static class add_dynamic_partitions_argsTupleSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_argsTupleScheme getScheme() { - return new add_dynamic_partitions_argsTupleScheme(); + private static class fire_listener_event_argsTupleSchemeFactory implements SchemeFactory { + public fire_listener_event_argsTupleScheme getScheme() { + return new fire_listener_event_argsTupleScheme(); } } - private static class add_dynamic_partitions_argsTupleScheme extends TupleScheme { + private static class fire_listener_event_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -169219,11 +171779,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partiti } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new AddDynamicPartitions(); + struct.rqst = new FireEventRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -169232,25 +171792,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitio } - public static class add_dynamic_partitions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_result"); + public static class fire_listener_event_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_result"); - private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new add_dynamic_partitions_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new add_dynamic_partitions_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new fire_listener_event_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fire_listener_event_resultTupleSchemeFactory()); } - private NoSuchTxnException o1; // required - private TxnAbortedException o2; // required + private FireEventResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - O1((short)1, "o1"), - O2((short)2, "o2"); + SUCCESS((short)0, "success"); private static final Map byName = new HashMap(); @@ -169265,10 +171822,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // O1 - return O1; - case 2: // O2 - return O2; + case 0: // SUCCESS + return SUCCESS; default: return null; } @@ -169312,109 +171867,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_result.class, metaDataMap); } - public add_dynamic_partitions_result() { + public fire_listener_event_result() { } - public add_dynamic_partitions_result( - NoSuchTxnException o1, - TxnAbortedException o2) + public fire_listener_event_result( + FireEventResponse success) { this(); - this.o1 = o1; - this.o2 = o2; + this.success = success; } /** * Performs a deep copy on other. */ - public add_dynamic_partitions_result(add_dynamic_partitions_result other) { - if (other.isSetO1()) { - this.o1 = new NoSuchTxnException(other.o1); - } - if (other.isSetO2()) { - this.o2 = new TxnAbortedException(other.o2); + public fire_listener_event_result(fire_listener_event_result other) { + if (other.isSetSuccess()) { + this.success = new FireEventResponse(other.success); } } - public add_dynamic_partitions_result deepCopy() { - return new add_dynamic_partitions_result(this); + public fire_listener_event_result deepCopy() { + return new fire_listener_event_result(this); } @Override public void clear() { - this.o1 = null; - this.o2 = null; - } - - public NoSuchTxnException getO1() { - return this.o1; - } - - public void setO1(NoSuchTxnException o1) { - this.o1 = o1; - } - - public void unsetO1() { - this.o1 = null; - } - - /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ - public boolean isSetO1() { - return this.o1 != null; - } - - public void setO1IsSet(boolean value) { - if (!value) { - this.o1 = null; - } + this.success = null; } - public TxnAbortedException getO2() { - return this.o2; + public FireEventResponse getSuccess() { + return this.success; } - public void setO2(TxnAbortedException o2) { - this.o2 = o2; + public void setSuccess(FireEventResponse success) { + this.success = success; } - public void unsetO2() { - this.o2 = null; + public void unsetSuccess() { + this.success = null; } - /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ - public boolean isSetO2() { - return this.o2 != null; + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; } - public void setO2IsSet(boolean value) { + public void setSuccessIsSet(boolean value) { if (!value) { - this.o2 = null; + this.success = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case O1: - if (value == null) { - unsetO1(); - } else { - setO1((NoSuchTxnException)value); - } - break; - - case O2: + case SUCCESS: if (value == null) { - unsetO2(); + unsetSuccess(); } else { - setO2((TxnAbortedException)value); + setSuccess((FireEventResponse)value); } break; @@ -169423,11 +171939,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case O1: - return getO1(); - - case O2: - return getO2(); + case SUCCESS: + return getSuccess(); } throw new IllegalStateException(); @@ -169440,10 +171953,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case O1: - return isSetO1(); - case O2: - return isSetO2(); + case SUCCESS: + return isSetSuccess(); } throw new IllegalStateException(); } @@ -169452,30 +171963,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof add_dynamic_partitions_result) - return this.equals((add_dynamic_partitions_result)that); + if (that instanceof fire_listener_event_result) + return this.equals((fire_listener_event_result)that); return false; } - public boolean equals(add_dynamic_partitions_result that) { + public boolean equals(fire_listener_event_result that) { if (that == null) return false; - boolean this_present_o1 = true && this.isSetO1(); - boolean that_present_o1 = true && that.isSetO1(); - if (this_present_o1 || that_present_o1) { - if (!(this_present_o1 && that_present_o1)) - return false; - if (!this.o1.equals(that.o1)) - return false; - } - - boolean this_present_o2 = true && this.isSetO2(); - boolean that_present_o2 = true && that.isSetO2(); - if (this_present_o2 || that_present_o2) { - if (!(this_present_o2 && that_present_o2)) + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) return false; - if (!this.o2.equals(that.o2)) + if (!this.success.equals(that.success)) return false; } @@ -169486,43 +171988,28 @@ public boolean equals(add_dynamic_partitions_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_o1 = true && (isSetO1()); - list.add(present_o1); - if (present_o1) - list.add(o1); - - boolean present_o2 = true && (isSetO2()); - list.add(present_o2); - if (present_o2) - list.add(o2); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); return list.hashCode(); } @Override - public int compareTo(add_dynamic_partitions_result other) { + public int compareTo(fire_listener_event_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO1()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); if (lastComparison != 0) { return lastComparison; } - if (isSetO2()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); if (lastComparison != 0) { return lastComparison; } @@ -169544,22 +172031,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("add_dynamic_partitions_result("); + StringBuilder sb = new StringBuilder("fire_listener_event_result("); boolean first = true; - sb.append("o1:"); - if (this.o1 == null) { - sb.append("null"); - } else { - sb.append(this.o1); - } - first = false; - if (!first) sb.append(", "); - sb.append("o2:"); - if (this.o2 == null) { + sb.append("success:"); + if (this.success == null) { sb.append("null"); } else { - sb.append(this.o2); + sb.append(this.success); } first = false; sb.append(")"); @@ -169569,6 +172048,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -169587,15 +172069,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class add_dynamic_partitions_resultStandardSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_resultStandardScheme getScheme() { - return new add_dynamic_partitions_resultStandardScheme(); + private static class fire_listener_event_resultStandardSchemeFactory implements SchemeFactory { + public fire_listener_event_resultStandardScheme getScheme() { + return new fire_listener_event_resultStandardScheme(); } } - private static class add_dynamic_partitions_resultStandardScheme extends StandardScheme { + private static class fire_listener_event_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -169605,20 +172087,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti break; } switch (schemeField.id) { - case 1: // O1 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchTxnException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // O2 + case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new TxnAbortedException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); + struct.success = new FireEventResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -169632,18 +172105,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.o1 != null) { - oprot.writeFieldBegin(O1_FIELD_DESC); - struct.o1.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.o2 != null) { - oprot.writeFieldBegin(O2_FIELD_DESC); - struct.o2.write(oprot); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -169652,68 +172120,55 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partit } - private static class add_dynamic_partitions_resultTupleSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_resultTupleScheme getScheme() { - return new add_dynamic_partitions_resultTupleScheme(); + private static class fire_listener_event_resultTupleSchemeFactory implements SchemeFactory { + public fire_listener_event_resultTupleScheme getScheme() { + return new fire_listener_event_resultTupleScheme(); } } - private static class add_dynamic_partitions_resultTupleScheme extends TupleScheme { + private static class fire_listener_event_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetO1()) { - struct.o1.write(oprot); - } - if (struct.isSetO2()) { - struct.o2.write(oprot); + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.o1 = new NoSuchTxnException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } - if (incoming.get(1)) { - struct.o2 = new TxnAbortedException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); + struct.success = new FireEventResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); } } } } - public static class get_next_notification_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_args"); + public static class flushCache_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_next_notification_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_next_notification_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new flushCache_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new flushCache_argsTupleSchemeFactory()); } - private NotificationEventRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); +; private static final Map byName = new HashMap(); @@ -169728,8 +172183,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; default: return null; } @@ -169768,86 +172221,37 @@ public String getFieldName() { return _fieldName; } } - - // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_args.class, metaDataMap); - } - - public get_next_notification_args() { + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_args.class, metaDataMap); } - public get_next_notification_args( - NotificationEventRequest rqst) - { - this(); - this.rqst = rqst; + public flushCache_args() { } /** * Performs a deep copy on other. */ - public get_next_notification_args(get_next_notification_args other) { - if (other.isSetRqst()) { - this.rqst = new NotificationEventRequest(other.rqst); - } + public flushCache_args(flushCache_args other) { } - public get_next_notification_args deepCopy() { - return new get_next_notification_args(this); + public flushCache_args deepCopy() { + return new flushCache_args(this); } @Override public void clear() { - this.rqst = null; - } - - public NotificationEventRequest getRqst() { - return this.rqst; - } - - public void setRqst(NotificationEventRequest rqst) { - this.rqst = rqst; - } - - public void unsetRqst() { - this.rqst = null; - } - - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; - } - - public void setRqstIsSet(boolean value) { - if (!value) { - this.rqst = null; - } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: - if (value == null) { - unsetRqst(); - } else { - setRqst((NotificationEventRequest)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); - } throw new IllegalStateException(); } @@ -169859,8 +172263,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); } throw new IllegalStateException(); } @@ -169869,24 +172271,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_next_notification_args) - return this.equals((get_next_notification_args)that); + if (that instanceof flushCache_args) + return this.equals((flushCache_args)that); return false; } - public boolean equals(get_next_notification_args that) { + public boolean equals(flushCache_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) - return false; - if (!this.rqst.equals(that.rqst)) - return false; - } - return true; } @@ -169894,32 +172287,17 @@ public boolean equals(get_next_notification_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); - return list.hashCode(); } @Override - public int compareTo(get_next_notification_args other) { + public int compareTo(flushCache_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -169937,16 +172315,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_next_notification_args("); + StringBuilder sb = new StringBuilder("flushCache_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { - sb.append("null"); - } else { - sb.append(this.rqst); - } - first = false; sb.append(")"); return sb.toString(); } @@ -169954,9 +172325,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -169975,15 +172343,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_next_notification_argsStandardSchemeFactory implements SchemeFactory { - public get_next_notification_argsStandardScheme getScheme() { - return new get_next_notification_argsStandardScheme(); + private static class flushCache_argsStandardSchemeFactory implements SchemeFactory { + public flushCache_argsStandardScheme getScheme() { + return new flushCache_argsStandardScheme(); } } - private static class get_next_notification_argsStandardScheme extends StandardScheme { + private static class flushCache_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -169993,15 +172361,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati break; } switch (schemeField.id) { - case 1: // RQST - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new NotificationEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -170011,72 +172370,51 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_next_notification_argsTupleSchemeFactory implements SchemeFactory { - public get_next_notification_argsTupleScheme getScheme() { - return new get_next_notification_argsTupleScheme(); + private static class flushCache_argsTupleSchemeFactory implements SchemeFactory { + public flushCache_argsTupleScheme getScheme() { + return new flushCache_argsTupleScheme(); } } - private static class get_next_notification_argsTupleScheme extends TupleScheme { + private static class flushCache_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.rqst = new NotificationEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); - } } } } - public static class get_next_notification_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_result"); + public static class flushCache_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_next_notification_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_next_notification_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new flushCache_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new flushCache_resultTupleSchemeFactory()); } - private NotificationEventResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - SUCCESS((short)0, "success"); +; private static final Map byName = new HashMap(); @@ -170091,8 +172429,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 0: // SUCCESS - return SUCCESS; default: return null; } @@ -170131,86 +172467,37 @@ public String getFieldName() { return _fieldName; } } - - // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_result.class, metaDataMap); - } - - public get_next_notification_result() { + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_result.class, metaDataMap); } - public get_next_notification_result( - NotificationEventResponse success) - { - this(); - this.success = success; + public flushCache_result() { } /** * Performs a deep copy on other. */ - public get_next_notification_result(get_next_notification_result other) { - if (other.isSetSuccess()) { - this.success = new NotificationEventResponse(other.success); - } + public flushCache_result(flushCache_result other) { } - public get_next_notification_result deepCopy() { - return new get_next_notification_result(this); + public flushCache_result deepCopy() { + return new flushCache_result(this); } @Override public void clear() { - this.success = null; - } - - public NotificationEventResponse getSuccess() { - return this.success; - } - - public void setSuccess(NotificationEventResponse success) { - this.success = success; - } - - public void unsetSuccess() { - this.success = null; - } - - /** Returns true if field success is set (has been assigned a value) and false otherwise */ - public boolean isSetSuccess() { - return this.success != null; - } - - public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case SUCCESS: - if (value == null) { - unsetSuccess(); - } else { - setSuccess((NotificationEventResponse)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case SUCCESS: - return getSuccess(); - } throw new IllegalStateException(); } @@ -170222,8 +172509,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case SUCCESS: - return isSetSuccess(); } throw new IllegalStateException(); } @@ -170232,24 +172517,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_next_notification_result) - return this.equals((get_next_notification_result)that); + if (that instanceof flushCache_result) + return this.equals((flushCache_result)that); return false; } - public boolean equals(get_next_notification_result that) { + public boolean equals(flushCache_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); - if (this_present_success || that_present_success) { - if (!(this_present_success && that_present_success)) - return false; - if (!this.success.equals(that.success)) - return false; - } - return true; } @@ -170257,32 +172533,17 @@ public boolean equals(get_next_notification_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true && (isSetSuccess()); - list.add(present_success); - if (present_success) - list.add(success); - return list.hashCode(); } @Override - public int compareTo(get_next_notification_result other) { + public int compareTo(flushCache_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetSuccess()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -170300,16 +172561,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_next_notification_result("); + StringBuilder sb = new StringBuilder("flushCache_result("); boolean first = true; - sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } - first = false; sb.append(")"); return sb.toString(); } @@ -170317,9 +172571,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -170338,15 +172589,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_next_notification_resultStandardSchemeFactory implements SchemeFactory { - public get_next_notification_resultStandardScheme getScheme() { - return new get_next_notification_resultStandardScheme(); + private static class flushCache_resultStandardSchemeFactory implements SchemeFactory { + public flushCache_resultStandardScheme getScheme() { + return new flushCache_resultStandardScheme(); } } - private static class get_next_notification_resultStandardScheme extends StandardScheme { + private static class flushCache_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -170356,15 +172607,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati break; } switch (schemeField.id) { - case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new NotificationEventResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -170374,70 +172616,53 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { - oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_next_notification_resultTupleSchemeFactory implements SchemeFactory { - public get_next_notification_resultTupleScheme getScheme() { - return new get_next_notification_resultTupleScheme(); + private static class flushCache_resultTupleSchemeFactory implements SchemeFactory { + public flushCache_resultTupleScheme getScheme() { + return new flushCache_resultTupleScheme(); } } - private static class get_next_notification_resultTupleScheme extends TupleScheme { + private static class flushCache_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetSuccess()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetSuccess()) { - struct.success.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.success = new NotificationEventResponse(); - struct.success.read(iprot); - struct.setSuccessIsSet(true); - } } } } - public static class get_current_notificationEventId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_args"); + public static class get_file_metadata_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_args"); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_current_notificationEventId_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_current_notificationEventId_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_by_expr_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_by_expr_argsTupleSchemeFactory()); } + private GetFileMetadataByExprRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -170452,6 +172677,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // REQ + return REQ; default: return null; } @@ -170490,37 +172717,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_args.class, metaDataMap); } - public get_current_notificationEventId_args() { + public get_file_metadata_by_expr_args() { + } + + public get_file_metadata_by_expr_args( + GetFileMetadataByExprRequest req) + { + this(); + this.req = req; } /** * Performs a deep copy on other. */ - public get_current_notificationEventId_args(get_current_notificationEventId_args other) { + public get_file_metadata_by_expr_args(get_file_metadata_by_expr_args other) { + if (other.isSetReq()) { + this.req = new GetFileMetadataByExprRequest(other.req); + } } - public get_current_notificationEventId_args deepCopy() { - return new get_current_notificationEventId_args(this); + public get_file_metadata_by_expr_args deepCopy() { + return new get_file_metadata_by_expr_args(this); } @Override public void clear() { + this.req = null; + } + + public GetFileMetadataByExprRequest getReq() { + return this.req; + } + + public void setReq(GetFileMetadataByExprRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((GetFileMetadataByExprRequest)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case REQ: + return getReq(); + } throw new IllegalStateException(); } @@ -170532,6 +172808,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -170540,15 +172818,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_current_notificationEventId_args) - return this.equals((get_current_notificationEventId_args)that); + if (that instanceof get_file_metadata_by_expr_args) + return this.equals((get_file_metadata_by_expr_args)that); return false; } - public boolean equals(get_current_notificationEventId_args that) { + public boolean equals(get_file_metadata_by_expr_args that) { if (that == null) return false; + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + return true; } @@ -170556,17 +172843,32 @@ public boolean equals(get_current_notificationEventId_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + return list.hashCode(); } @Override - public int compareTo(get_current_notificationEventId_args other) { + public int compareTo(get_file_metadata_by_expr_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -170584,9 +172886,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_current_notificationEventId_args("); + StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_args("); boolean first = true; + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; sb.append(")"); return sb.toString(); } @@ -170594,6 +172903,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (req != null) { + req.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -170612,15 +172924,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_current_notificationEventId_argsStandardSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_argsStandardScheme getScheme() { - return new get_current_notificationEventId_argsStandardScheme(); + private static class get_file_metadata_by_expr_argsStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_argsStandardScheme getScheme() { + return new get_file_metadata_by_expr_argsStandardScheme(); } } - private static class get_current_notificationEventId_argsStandardScheme extends StandardScheme { + private static class get_file_metadata_by_expr_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -170630,6 +172942,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific break; } switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new GetFileMetadataByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -170639,49 +172960,68 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_current_notificationEventId_argsTupleSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_argsTupleScheme getScheme() { - return new get_current_notificationEventId_argsTupleScheme(); + private static class get_file_metadata_by_expr_argsTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_argsTupleScheme getScheme() { + return new get_file_metadata_by_expr_argsTupleScheme(); } } - private static class get_current_notificationEventId_argsTupleScheme extends TupleScheme { + private static class get_file_metadata_by_expr_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new GetFileMetadataByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } } } } - public static class get_current_notificationEventId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_result"); + public static class get_file_metadata_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_current_notificationEventId_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_current_notificationEventId_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_by_expr_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_by_expr_resultTupleSchemeFactory()); } - private CurrentNotificationEventId success; // required + private GetFileMetadataByExprResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -170746,16 +173086,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CurrentNotificationEventId.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_result.class, metaDataMap); } - public get_current_notificationEventId_result() { + public get_file_metadata_by_expr_result() { } - public get_current_notificationEventId_result( - CurrentNotificationEventId success) + public get_file_metadata_by_expr_result( + GetFileMetadataByExprResult success) { this(); this.success = success; @@ -170764,14 +173104,14 @@ public get_current_notificationEventId_result( /** * Performs a deep copy on other. */ - public get_current_notificationEventId_result(get_current_notificationEventId_result other) { + public get_file_metadata_by_expr_result(get_file_metadata_by_expr_result other) { if (other.isSetSuccess()) { - this.success = new CurrentNotificationEventId(other.success); + this.success = new GetFileMetadataByExprResult(other.success); } } - public get_current_notificationEventId_result deepCopy() { - return new get_current_notificationEventId_result(this); + public get_file_metadata_by_expr_result deepCopy() { + return new get_file_metadata_by_expr_result(this); } @Override @@ -170779,11 +173119,11 @@ public void clear() { this.success = null; } - public CurrentNotificationEventId getSuccess() { + public GetFileMetadataByExprResult getSuccess() { return this.success; } - public void setSuccess(CurrentNotificationEventId success) { + public void setSuccess(GetFileMetadataByExprResult success) { this.success = success; } @@ -170808,7 +173148,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((CurrentNotificationEventId)value); + setSuccess((GetFileMetadataByExprResult)value); } break; @@ -170841,12 +173181,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_current_notificationEventId_result) - return this.equals((get_current_notificationEventId_result)that); + if (that instanceof get_file_metadata_by_expr_result) + return this.equals((get_file_metadata_by_expr_result)that); return false; } - public boolean equals(get_current_notificationEventId_result that) { + public boolean equals(get_file_metadata_by_expr_result that) { if (that == null) return false; @@ -170875,7 +173215,7 @@ public int hashCode() { } @Override - public int compareTo(get_current_notificationEventId_result other) { + public int compareTo(get_file_metadata_by_expr_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -170909,7 +173249,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_current_notificationEventId_result("); + StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_result("); boolean first = true; sb.append("success:"); @@ -170947,15 +173287,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_current_notificationEventId_resultStandardSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_resultStandardScheme getScheme() { - return new get_current_notificationEventId_resultStandardScheme(); + private static class get_file_metadata_by_expr_resultStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_resultStandardScheme getScheme() { + return new get_file_metadata_by_expr_resultStandardScheme(); } } - private static class get_current_notificationEventId_resultStandardScheme extends StandardScheme { + private static class get_file_metadata_by_expr_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -170967,7 +173307,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new CurrentNotificationEventId(); + struct.success = new GetFileMetadataByExprResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -170983,7 +173323,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -170998,16 +173338,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notifi } - private static class get_current_notificationEventId_resultTupleSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_resultTupleScheme getScheme() { - return new get_current_notificationEventId_resultTupleScheme(); + private static class get_file_metadata_by_expr_resultTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_resultTupleScheme getScheme() { + return new get_file_metadata_by_expr_resultTupleScheme(); } } - private static class get_current_notificationEventId_resultTupleScheme extends TupleScheme { + private static class get_file_metadata_by_expr_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -171020,11 +173360,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notific } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new CurrentNotificationEventId(); + struct.success = new GetFileMetadataByExprResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -171033,22 +173373,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notifica } - public static class fire_listener_event_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_args"); + public static class get_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new fire_listener_event_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new fire_listener_event_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_argsTupleSchemeFactory()); } - private FireEventRequest rqst; // required + private GetFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -171063,8 +173403,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; + case 1: // REQ + return REQ; default: return null; } @@ -171108,70 +173448,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventRequest.class))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_args.class, metaDataMap); } - public fire_listener_event_args() { + public get_file_metadata_args() { } - public fire_listener_event_args( - FireEventRequest rqst) + public get_file_metadata_args( + GetFileMetadataRequest req) { this(); - this.rqst = rqst; + this.req = req; } /** * Performs a deep copy on other. */ - public fire_listener_event_args(fire_listener_event_args other) { - if (other.isSetRqst()) { - this.rqst = new FireEventRequest(other.rqst); + public get_file_metadata_args(get_file_metadata_args other) { + if (other.isSetReq()) { + this.req = new GetFileMetadataRequest(other.req); } } - public fire_listener_event_args deepCopy() { - return new fire_listener_event_args(this); + public get_file_metadata_args deepCopy() { + return new get_file_metadata_args(this); } @Override public void clear() { - this.rqst = null; + this.req = null; } - public FireEventRequest getRqst() { - return this.rqst; + public GetFileMetadataRequest getReq() { + return this.req; } - public void setRqst(FireEventRequest rqst) { - this.rqst = rqst; + public void setReq(GetFileMetadataRequest req) { + this.req = req; } - public void unsetRqst() { - this.rqst = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setRqstIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.rqst = null; + this.req = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: + case REQ: if (value == null) { - unsetRqst(); + unsetReq(); } else { - setRqst((FireEventRequest)value); + setReq((GetFileMetadataRequest)value); } break; @@ -171180,8 +173520,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -171194,8 +173534,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -171204,21 +173544,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof fire_listener_event_args) - return this.equals((fire_listener_event_args)that); + if (that instanceof get_file_metadata_args) + return this.equals((get_file_metadata_args)that); return false; } - public boolean equals(fire_listener_event_args that) { + public boolean equals(get_file_metadata_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (!this.rqst.equals(that.rqst)) + if (!this.req.equals(that.req)) return false; } @@ -171229,28 +173569,28 @@ public boolean equals(fire_listener_event_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @Override - public int compareTo(fire_listener_event_args other) { + public int compareTo(get_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -171272,14 +173612,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("fire_listener_event_args("); + StringBuilder sb = new StringBuilder("get_file_metadata_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.rqst); + sb.append(this.req); } first = false; sb.append(")"); @@ -171289,8 +173629,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); + if (req != null) { + req.validate(); } } @@ -171310,15 +173650,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class fire_listener_event_argsStandardSchemeFactory implements SchemeFactory { - public fire_listener_event_argsStandardScheme getScheme() { - return new fire_listener_event_argsStandardScheme(); + private static class get_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_argsStandardScheme getScheme() { + return new get_file_metadata_argsStandardScheme(); } } - private static class fire_listener_event_argsStandardScheme extends StandardScheme { + private static class get_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -171328,11 +173668,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event break; } switch (schemeField.id) { - case 1: // RQST + case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new FireEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new GetFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -171346,13 +173686,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -171361,53 +173701,53 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_even } - private static class fire_listener_event_argsTupleSchemeFactory implements SchemeFactory { - public fire_listener_event_argsTupleScheme getScheme() { - return new fire_listener_event_argsTupleScheme(); + private static class get_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_argsTupleScheme getScheme() { + return new get_file_metadata_argsTupleScheme(); } } - private static class fire_listener_event_argsTupleScheme extends TupleScheme { + private static class get_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { + if (struct.isSetReq()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new FireEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new GetFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - public static class fire_listener_event_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_result"); + public static class get_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new fire_listener_event_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new fire_listener_event_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_resultTupleSchemeFactory()); } - private FireEventResponse success; // required + private GetFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -171472,16 +173812,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_result.class, metaDataMap); } - public fire_listener_event_result() { + public get_file_metadata_result() { } - public fire_listener_event_result( - FireEventResponse success) + public get_file_metadata_result( + GetFileMetadataResult success) { this(); this.success = success; @@ -171490,14 +173830,14 @@ public fire_listener_event_result( /** * Performs a deep copy on other. */ - public fire_listener_event_result(fire_listener_event_result other) { + public get_file_metadata_result(get_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new FireEventResponse(other.success); + this.success = new GetFileMetadataResult(other.success); } } - public fire_listener_event_result deepCopy() { - return new fire_listener_event_result(this); + public get_file_metadata_result deepCopy() { + return new get_file_metadata_result(this); } @Override @@ -171505,11 +173845,11 @@ public void clear() { this.success = null; } - public FireEventResponse getSuccess() { + public GetFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(FireEventResponse success) { + public void setSuccess(GetFileMetadataResult success) { this.success = success; } @@ -171534,7 +173874,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((FireEventResponse)value); + setSuccess((GetFileMetadataResult)value); } break; @@ -171567,12 +173907,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof fire_listener_event_result) - return this.equals((fire_listener_event_result)that); + if (that instanceof get_file_metadata_result) + return this.equals((get_file_metadata_result)that); return false; } - public boolean equals(fire_listener_event_result that) { + public boolean equals(get_file_metadata_result that) { if (that == null) return false; @@ -171601,7 +173941,7 @@ public int hashCode() { } @Override - public int compareTo(fire_listener_event_result other) { + public int compareTo(get_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -171635,7 +173975,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("fire_listener_event_result("); + StringBuilder sb = new StringBuilder("get_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -171673,15 +174013,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class fire_listener_event_resultStandardSchemeFactory implements SchemeFactory { - public fire_listener_event_resultStandardScheme getScheme() { - return new fire_listener_event_resultStandardScheme(); + private static class get_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_resultStandardScheme getScheme() { + return new get_file_metadata_resultStandardScheme(); } } - private static class fire_listener_event_resultStandardScheme extends StandardScheme { + private static class get_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -171693,7 +174033,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new FireEventResponse(); + struct.success = new GetFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -171709,7 +174049,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -171724,16 +174064,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_even } - private static class fire_listener_event_resultTupleSchemeFactory implements SchemeFactory { - public fire_listener_event_resultTupleScheme getScheme() { - return new fire_listener_event_resultTupleScheme(); + private static class get_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_resultTupleScheme getScheme() { + return new get_file_metadata_resultTupleScheme(); } } - private static class fire_listener_event_resultTupleScheme extends TupleScheme { + private static class get_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -171746,11 +174086,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new FireEventResponse(); + struct.success = new GetFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -171759,20 +174099,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_ } - public static class flushCache_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_args"); + public static class put_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_args"); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new flushCache_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new flushCache_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new put_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new put_file_metadata_argsTupleSchemeFactory()); } + private PutFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -171787,6 +174129,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // REQ + return REQ; default: return null; } @@ -171825,37 +174169,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_args.class, metaDataMap); } - public flushCache_args() { + public put_file_metadata_args() { + } + + public put_file_metadata_args( + PutFileMetadataRequest req) + { + this(); + this.req = req; } /** * Performs a deep copy on other. */ - public flushCache_args(flushCache_args other) { + public put_file_metadata_args(put_file_metadata_args other) { + if (other.isSetReq()) { + this.req = new PutFileMetadataRequest(other.req); + } } - public flushCache_args deepCopy() { - return new flushCache_args(this); + public put_file_metadata_args deepCopy() { + return new put_file_metadata_args(this); } @Override public void clear() { + this.req = null; + } + + public PutFileMetadataRequest getReq() { + return this.req; + } + + public void setReq(PutFileMetadataRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((PutFileMetadataRequest)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case REQ: + return getReq(); + } throw new IllegalStateException(); } @@ -171867,6 +174260,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -171875,15 +174270,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof flushCache_args) - return this.equals((flushCache_args)that); + if (that instanceof put_file_metadata_args) + return this.equals((put_file_metadata_args)that); return false; } - public boolean equals(flushCache_args that) { + public boolean equals(put_file_metadata_args that) { if (that == null) return false; + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + return true; } @@ -171891,17 +174295,32 @@ public boolean equals(flushCache_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + return list.hashCode(); } @Override - public int compareTo(flushCache_args other) { + public int compareTo(put_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -171919,9 +174338,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("flushCache_args("); + StringBuilder sb = new StringBuilder("put_file_metadata_args("); boolean first = true; + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; sb.append(")"); return sb.toString(); } @@ -171929,6 +174355,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (req != null) { + req.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -171947,15 +174376,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class flushCache_argsStandardSchemeFactory implements SchemeFactory { - public flushCache_argsStandardScheme getScheme() { - return new flushCache_argsStandardScheme(); + private static class put_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public put_file_metadata_argsStandardScheme getScheme() { + return new put_file_metadata_argsStandardScheme(); } } - private static class flushCache_argsStandardScheme extends StandardScheme { + private static class put_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -171965,6 +174394,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_args str break; } switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new PutFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -171974,51 +174412,72 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_args str struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class flushCache_argsTupleSchemeFactory implements SchemeFactory { - public flushCache_argsTupleScheme getScheme() { - return new flushCache_argsTupleScheme(); + private static class put_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public put_file_metadata_argsTupleScheme getScheme() { + return new put_file_metadata_argsTupleScheme(); } } - private static class flushCache_argsTupleScheme extends TupleScheme { + private static class put_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new PutFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } } } } - public static class flushCache_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_result"); + public static class put_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new flushCache_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new flushCache_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new put_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new put_file_metadata_resultTupleSchemeFactory()); } + private PutFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + SUCCESS((short)0, "success"); private static final Map byName = new HashMap(); @@ -172033,6 +174492,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; default: return null; } @@ -172071,37 +174532,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_result.class, metaDataMap); } - public flushCache_result() { + public put_file_metadata_result() { + } + + public put_file_metadata_result( + PutFileMetadataResult success) + { + this(); + this.success = success; } /** * Performs a deep copy on other. */ - public flushCache_result(flushCache_result other) { + public put_file_metadata_result(put_file_metadata_result other) { + if (other.isSetSuccess()) { + this.success = new PutFileMetadataResult(other.success); + } } - public flushCache_result deepCopy() { - return new flushCache_result(this); + public put_file_metadata_result deepCopy() { + return new put_file_metadata_result(this); } @Override public void clear() { + this.success = null; + } + + public PutFileMetadataResult getSuccess() { + return this.success; + } + + public void setSuccess(PutFileMetadataResult success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((PutFileMetadataResult)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + } throw new IllegalStateException(); } @@ -172113,6 +174623,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); } throw new IllegalStateException(); } @@ -172121,15 +174633,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof flushCache_result) - return this.equals((flushCache_result)that); + if (that instanceof put_file_metadata_result) + return this.equals((put_file_metadata_result)that); return false; } - public boolean equals(flushCache_result that) { + public boolean equals(put_file_metadata_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + return true; } @@ -172137,17 +174658,32 @@ public boolean equals(flushCache_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + return list.hashCode(); } @Override - public int compareTo(flushCache_result other) { + public int compareTo(put_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -172165,9 +174701,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("flushCache_result("); + StringBuilder sb = new StringBuilder("put_file_metadata_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; sb.append(")"); return sb.toString(); } @@ -172175,6 +174718,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -172193,15 +174739,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class flushCache_resultStandardSchemeFactory implements SchemeFactory { - public flushCache_resultStandardScheme getScheme() { - return new flushCache_resultStandardScheme(); + private static class put_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public put_file_metadata_resultStandardScheme getScheme() { + return new put_file_metadata_resultStandardScheme(); } } - private static class flushCache_resultStandardScheme extends StandardScheme { + private static class put_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -172211,6 +174757,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result s break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new PutFileMetadataResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -172220,49 +174775,68 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class flushCache_resultTupleSchemeFactory implements SchemeFactory { - public flushCache_resultTupleScheme getScheme() { - return new flushCache_resultTupleScheme(); + private static class put_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public put_file_metadata_resultTupleScheme getScheme() { + return new put_file_metadata_resultTupleScheme(); } } - private static class flushCache_resultTupleScheme extends TupleScheme { + private static class put_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new PutFileMetadataResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } } } } - public static class get_file_metadata_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_args"); + public static class clear_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_by_expr_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_by_expr_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new clear_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clear_file_metadata_argsTupleSchemeFactory()); } - private GetFileMetadataByExprRequest req; // required + private ClearFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -172327,16 +174901,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_args.class, metaDataMap); } - public get_file_metadata_by_expr_args() { + public clear_file_metadata_args() { } - public get_file_metadata_by_expr_args( - GetFileMetadataByExprRequest req) + public clear_file_metadata_args( + ClearFileMetadataRequest req) { this(); this.req = req; @@ -172345,14 +174919,14 @@ public get_file_metadata_by_expr_args( /** * Performs a deep copy on other. */ - public get_file_metadata_by_expr_args(get_file_metadata_by_expr_args other) { + public clear_file_metadata_args(clear_file_metadata_args other) { if (other.isSetReq()) { - this.req = new GetFileMetadataByExprRequest(other.req); + this.req = new ClearFileMetadataRequest(other.req); } } - public get_file_metadata_by_expr_args deepCopy() { - return new get_file_metadata_by_expr_args(this); + public clear_file_metadata_args deepCopy() { + return new clear_file_metadata_args(this); } @Override @@ -172360,11 +174934,11 @@ public void clear() { this.req = null; } - public GetFileMetadataByExprRequest getReq() { + public ClearFileMetadataRequest getReq() { return this.req; } - public void setReq(GetFileMetadataByExprRequest req) { + public void setReq(ClearFileMetadataRequest req) { this.req = req; } @@ -172389,7 +174963,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((GetFileMetadataByExprRequest)value); + setReq((ClearFileMetadataRequest)value); } break; @@ -172422,12 +174996,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_by_expr_args) - return this.equals((get_file_metadata_by_expr_args)that); + if (that instanceof clear_file_metadata_args) + return this.equals((clear_file_metadata_args)that); return false; } - public boolean equals(get_file_metadata_by_expr_args that) { + public boolean equals(clear_file_metadata_args that) { if (that == null) return false; @@ -172456,7 +175030,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_by_expr_args other) { + public int compareTo(clear_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -172490,7 +175064,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_args("); + StringBuilder sb = new StringBuilder("clear_file_metadata_args("); boolean first = true; sb.append("req:"); @@ -172528,15 +175102,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_by_expr_argsStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_argsStandardScheme getScheme() { - return new get_file_metadata_by_expr_argsStandardScheme(); + private static class clear_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public clear_file_metadata_argsStandardScheme getScheme() { + return new clear_file_metadata_argsStandardScheme(); } } - private static class get_file_metadata_by_expr_argsStandardScheme extends StandardScheme { + private static class clear_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -172548,7 +175122,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new GetFileMetadataByExprRequest(); + struct.req = new ClearFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -172564,7 +175138,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -172579,16 +175153,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_by_expr_argsTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_argsTupleScheme getScheme() { - return new get_file_metadata_by_expr_argsTupleScheme(); + private static class clear_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public clear_file_metadata_argsTupleScheme getScheme() { + return new clear_file_metadata_argsTupleScheme(); } } - private static class get_file_metadata_by_expr_argsTupleScheme extends TupleScheme { + private static class clear_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -172601,11 +175175,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_b } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new GetFileMetadataByExprRequest(); + struct.req = new ClearFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -172614,18 +175188,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by } - public static class get_file_metadata_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_result"); + public static class clear_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_by_expr_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_by_expr_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new clear_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clear_file_metadata_resultTupleSchemeFactory()); } - private GetFileMetadataByExprResult success; // required + private ClearFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -172690,16 +175264,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_result.class, metaDataMap); } - public get_file_metadata_by_expr_result() { + public clear_file_metadata_result() { } - public get_file_metadata_by_expr_result( - GetFileMetadataByExprResult success) + public clear_file_metadata_result( + ClearFileMetadataResult success) { this(); this.success = success; @@ -172708,14 +175282,14 @@ public get_file_metadata_by_expr_result( /** * Performs a deep copy on other. */ - public get_file_metadata_by_expr_result(get_file_metadata_by_expr_result other) { + public clear_file_metadata_result(clear_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new GetFileMetadataByExprResult(other.success); + this.success = new ClearFileMetadataResult(other.success); } } - public get_file_metadata_by_expr_result deepCopy() { - return new get_file_metadata_by_expr_result(this); + public clear_file_metadata_result deepCopy() { + return new clear_file_metadata_result(this); } @Override @@ -172723,11 +175297,11 @@ public void clear() { this.success = null; } - public GetFileMetadataByExprResult getSuccess() { + public ClearFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(GetFileMetadataByExprResult success) { + public void setSuccess(ClearFileMetadataResult success) { this.success = success; } @@ -172752,7 +175326,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((GetFileMetadataByExprResult)value); + setSuccess((ClearFileMetadataResult)value); } break; @@ -172785,12 +175359,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_by_expr_result) - return this.equals((get_file_metadata_by_expr_result)that); + if (that instanceof clear_file_metadata_result) + return this.equals((clear_file_metadata_result)that); return false; } - public boolean equals(get_file_metadata_by_expr_result that) { + public boolean equals(clear_file_metadata_result that) { if (that == null) return false; @@ -172819,7 +175393,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_by_expr_result other) { + public int compareTo(clear_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -172853,7 +175427,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_result("); + StringBuilder sb = new StringBuilder("clear_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -172891,15 +175465,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_by_expr_resultStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_resultStandardScheme getScheme() { - return new get_file_metadata_by_expr_resultStandardScheme(); + private static class clear_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public clear_file_metadata_resultStandardScheme getScheme() { + return new clear_file_metadata_resultStandardScheme(); } } - private static class get_file_metadata_by_expr_resultStandardScheme extends StandardScheme { + private static class clear_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -172911,7 +175485,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetFileMetadataByExprResult(); + struct.success = new ClearFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -172927,7 +175501,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -172942,16 +175516,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_by_expr_resultTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_resultTupleScheme getScheme() { - return new get_file_metadata_by_expr_resultTupleScheme(); + private static class clear_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public clear_file_metadata_resultTupleScheme getScheme() { + return new clear_file_metadata_resultTupleScheme(); } } - private static class get_file_metadata_by_expr_resultTupleScheme extends TupleScheme { + private static class clear_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -172964,11 +175538,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_b } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetFileMetadataByExprResult(); + struct.success = new ClearFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -172977,18 +175551,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by } - public static class get_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_args"); + public static class cache_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new cache_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new cache_file_metadata_argsTupleSchemeFactory()); } - private GetFileMetadataRequest req; // required + private CacheFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -173053,16 +175627,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_args.class, metaDataMap); } - public get_file_metadata_args() { + public cache_file_metadata_args() { } - public get_file_metadata_args( - GetFileMetadataRequest req) + public cache_file_metadata_args( + CacheFileMetadataRequest req) { this(); this.req = req; @@ -173071,14 +175645,14 @@ public get_file_metadata_args( /** * Performs a deep copy on other. */ - public get_file_metadata_args(get_file_metadata_args other) { + public cache_file_metadata_args(cache_file_metadata_args other) { if (other.isSetReq()) { - this.req = new GetFileMetadataRequest(other.req); + this.req = new CacheFileMetadataRequest(other.req); } } - public get_file_metadata_args deepCopy() { - return new get_file_metadata_args(this); + public cache_file_metadata_args deepCopy() { + return new cache_file_metadata_args(this); } @Override @@ -173086,11 +175660,11 @@ public void clear() { this.req = null; } - public GetFileMetadataRequest getReq() { + public CacheFileMetadataRequest getReq() { return this.req; } - public void setReq(GetFileMetadataRequest req) { + public void setReq(CacheFileMetadataRequest req) { this.req = req; } @@ -173115,7 +175689,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((GetFileMetadataRequest)value); + setReq((CacheFileMetadataRequest)value); } break; @@ -173148,12 +175722,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_args) - return this.equals((get_file_metadata_args)that); + if (that instanceof cache_file_metadata_args) + return this.equals((cache_file_metadata_args)that); return false; } - public boolean equals(get_file_metadata_args that) { + public boolean equals(cache_file_metadata_args that) { if (that == null) return false; @@ -173182,7 +175756,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_args other) { + public int compareTo(cache_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -173216,7 +175790,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_args("); + StringBuilder sb = new StringBuilder("cache_file_metadata_args("); boolean first = true; sb.append("req:"); @@ -173254,15 +175828,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_argsStandardScheme getScheme() { - return new get_file_metadata_argsStandardScheme(); + private static class cache_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public cache_file_metadata_argsStandardScheme getScheme() { + return new cache_file_metadata_argsStandardScheme(); } } - private static class get_file_metadata_argsStandardScheme extends StandardScheme { + private static class cache_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173274,7 +175848,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_a switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new GetFileMetadataRequest(); + struct.req = new CacheFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -173290,7 +175864,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -173305,16 +175879,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_argsTupleScheme getScheme() { - return new get_file_metadata_argsTupleScheme(); + private static class cache_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public cache_file_metadata_argsTupleScheme getScheme() { + return new cache_file_metadata_argsTupleScheme(); } } - private static class get_file_metadata_argsTupleScheme extends TupleScheme { + private static class cache_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -173327,11 +175901,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_a } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new GetFileMetadataRequest(); + struct.req = new CacheFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -173340,18 +175914,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_ar } - public static class get_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_result"); + public static class cache_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new cache_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new cache_file_metadata_resultTupleSchemeFactory()); } - private GetFileMetadataResult success; // required + private CacheFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -173416,16 +175990,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_result.class, metaDataMap); } - public get_file_metadata_result() { + public cache_file_metadata_result() { } - public get_file_metadata_result( - GetFileMetadataResult success) + public cache_file_metadata_result( + CacheFileMetadataResult success) { this(); this.success = success; @@ -173434,14 +176008,14 @@ public get_file_metadata_result( /** * Performs a deep copy on other. */ - public get_file_metadata_result(get_file_metadata_result other) { + public cache_file_metadata_result(cache_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new GetFileMetadataResult(other.success); + this.success = new CacheFileMetadataResult(other.success); } } - public get_file_metadata_result deepCopy() { - return new get_file_metadata_result(this); + public cache_file_metadata_result deepCopy() { + return new cache_file_metadata_result(this); } @Override @@ -173449,11 +176023,11 @@ public void clear() { this.success = null; } - public GetFileMetadataResult getSuccess() { + public CacheFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(GetFileMetadataResult success) { + public void setSuccess(CacheFileMetadataResult success) { this.success = success; } @@ -173478,7 +176052,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((GetFileMetadataResult)value); + setSuccess((CacheFileMetadataResult)value); } break; @@ -173511,12 +176085,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_result) - return this.equals((get_file_metadata_result)that); + if (that instanceof cache_file_metadata_result) + return this.equals((cache_file_metadata_result)that); return false; } - public boolean equals(get_file_metadata_result that) { + public boolean equals(cache_file_metadata_result that) { if (that == null) return false; @@ -173545,7 +176119,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_result other) { + public int compareTo(cache_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -173579,7 +176153,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_result("); + StringBuilder sb = new StringBuilder("cache_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -173617,15 +176191,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_resultStandardScheme getScheme() { - return new get_file_metadata_resultStandardScheme(); + private static class cache_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public cache_file_metadata_resultStandardScheme getScheme() { + return new cache_file_metadata_resultStandardScheme(); } } - private static class get_file_metadata_resultStandardScheme extends StandardScheme { + private static class cache_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173637,7 +176211,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_r switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetFileMetadataResult(); + struct.success = new CacheFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -173653,7 +176227,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -173668,16 +176242,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_resultTupleScheme getScheme() { - return new get_file_metadata_resultTupleScheme(); + private static class cache_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public cache_file_metadata_resultTupleScheme getScheme() { + return new cache_file_metadata_resultTupleScheme(); } } - private static class get_file_metadata_resultTupleScheme extends TupleScheme { + private static class cache_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -173690,11 +176264,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetFileMetadataResult(); + struct.success = new CacheFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -173703,18 +176277,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_re } - public static class put_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_args"); + public static class get_next_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_write_id_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new put_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new put_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_next_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_write_id_argsTupleSchemeFactory()); } - private PutFileMetadataRequest req; // required + private GetNextWriteIdRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -173779,16 +176353,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetNextWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_write_id_args.class, metaDataMap); } - public put_file_metadata_args() { + public get_next_write_id_args() { } - public put_file_metadata_args( - PutFileMetadataRequest req) + public get_next_write_id_args( + GetNextWriteIdRequest req) { this(); this.req = req; @@ -173797,14 +176371,14 @@ public put_file_metadata_args( /** * Performs a deep copy on other. */ - public put_file_metadata_args(put_file_metadata_args other) { + public get_next_write_id_args(get_next_write_id_args other) { if (other.isSetReq()) { - this.req = new PutFileMetadataRequest(other.req); + this.req = new GetNextWriteIdRequest(other.req); } } - public put_file_metadata_args deepCopy() { - return new put_file_metadata_args(this); + public get_next_write_id_args deepCopy() { + return new get_next_write_id_args(this); } @Override @@ -173812,11 +176386,11 @@ public void clear() { this.req = null; } - public PutFileMetadataRequest getReq() { + public GetNextWriteIdRequest getReq() { return this.req; } - public void setReq(PutFileMetadataRequest req) { + public void setReq(GetNextWriteIdRequest req) { this.req = req; } @@ -173841,7 +176415,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((PutFileMetadataRequest)value); + setReq((GetNextWriteIdRequest)value); } break; @@ -173874,12 +176448,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof put_file_metadata_args) - return this.equals((put_file_metadata_args)that); + if (that instanceof get_next_write_id_args) + return this.equals((get_next_write_id_args)that); return false; } - public boolean equals(put_file_metadata_args that) { + public boolean equals(get_next_write_id_args that) { if (that == null) return false; @@ -173908,7 +176482,7 @@ public int hashCode() { } @Override - public int compareTo(put_file_metadata_args other) { + public int compareTo(get_next_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -173942,7 +176516,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("put_file_metadata_args("); + StringBuilder sb = new StringBuilder("get_next_write_id_args("); boolean first = true; sb.append("req:"); @@ -173980,15 +176554,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class put_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public put_file_metadata_argsStandardScheme getScheme() { - return new put_file_metadata_argsStandardScheme(); + private static class get_next_write_id_argsStandardSchemeFactory implements SchemeFactory { + public get_next_write_id_argsStandardScheme getScheme() { + return new get_next_write_id_argsStandardScheme(); } } - private static class put_file_metadata_argsStandardScheme extends StandardScheme { + private static class get_next_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174000,7 +176574,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_a switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new PutFileMetadataRequest(); + struct.req = new GetNextWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -174016,7 +176590,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -174031,16 +176605,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_ } - private static class put_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public put_file_metadata_argsTupleScheme getScheme() { - return new put_file_metadata_argsTupleScheme(); + private static class get_next_write_id_argsTupleSchemeFactory implements SchemeFactory { + public get_next_write_id_argsTupleScheme getScheme() { + return new get_next_write_id_argsTupleScheme(); } } - private static class put_file_metadata_argsTupleScheme extends TupleScheme { + private static class get_next_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -174053,11 +176627,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_a } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new PutFileMetadataRequest(); + struct.req = new GetNextWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -174066,18 +176640,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_ar } - public static class put_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_result"); + public static class get_next_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_write_id_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new put_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new put_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_next_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_write_id_resultTupleSchemeFactory()); } - private PutFileMetadataResult success; // required + private GetNextWriteIdResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -174142,16 +176716,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetNextWriteIdResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_write_id_result.class, metaDataMap); } - public put_file_metadata_result() { + public get_next_write_id_result() { } - public put_file_metadata_result( - PutFileMetadataResult success) + public get_next_write_id_result( + GetNextWriteIdResult success) { this(); this.success = success; @@ -174160,14 +176734,14 @@ public put_file_metadata_result( /** * Performs a deep copy on other. */ - public put_file_metadata_result(put_file_metadata_result other) { + public get_next_write_id_result(get_next_write_id_result other) { if (other.isSetSuccess()) { - this.success = new PutFileMetadataResult(other.success); + this.success = new GetNextWriteIdResult(other.success); } } - public put_file_metadata_result deepCopy() { - return new put_file_metadata_result(this); + public get_next_write_id_result deepCopy() { + return new get_next_write_id_result(this); } @Override @@ -174175,11 +176749,11 @@ public void clear() { this.success = null; } - public PutFileMetadataResult getSuccess() { + public GetNextWriteIdResult getSuccess() { return this.success; } - public void setSuccess(PutFileMetadataResult success) { + public void setSuccess(GetNextWriteIdResult success) { this.success = success; } @@ -174204,7 +176778,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((PutFileMetadataResult)value); + setSuccess((GetNextWriteIdResult)value); } break; @@ -174237,12 +176811,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof put_file_metadata_result) - return this.equals((put_file_metadata_result)that); + if (that instanceof get_next_write_id_result) + return this.equals((get_next_write_id_result)that); return false; } - public boolean equals(put_file_metadata_result that) { + public boolean equals(get_next_write_id_result that) { if (that == null) return false; @@ -174271,7 +176845,7 @@ public int hashCode() { } @Override - public int compareTo(put_file_metadata_result other) { + public int compareTo(get_next_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -174305,7 +176879,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("put_file_metadata_result("); + StringBuilder sb = new StringBuilder("get_next_write_id_result("); boolean first = true; sb.append("success:"); @@ -174343,15 +176917,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class put_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public put_file_metadata_resultStandardScheme getScheme() { - return new put_file_metadata_resultStandardScheme(); + private static class get_next_write_id_resultStandardSchemeFactory implements SchemeFactory { + public get_next_write_id_resultStandardScheme getScheme() { + return new get_next_write_id_resultStandardScheme(); } } - private static class put_file_metadata_resultStandardScheme extends StandardScheme { + private static class get_next_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174363,7 +176937,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_r switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new PutFileMetadataResult(); + struct.success = new GetNextWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -174379,7 +176953,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -174394,16 +176968,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_ } - private static class put_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public put_file_metadata_resultTupleScheme getScheme() { - return new put_file_metadata_resultTupleScheme(); + private static class get_next_write_id_resultTupleSchemeFactory implements SchemeFactory { + public get_next_write_id_resultTupleScheme getScheme() { + return new get_next_write_id_resultTupleScheme(); } } - private static class put_file_metadata_resultTupleScheme extends TupleScheme { + private static class get_next_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -174416,11 +176990,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new PutFileMetadataResult(); + struct.success = new GetNextWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -174429,18 +177003,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_re } - public static class clear_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_args"); + public static class finalize_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finalize_write_id_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new clear_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new clear_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new finalize_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new finalize_write_id_argsTupleSchemeFactory()); } - private ClearFileMetadataRequest req; // required + private FinalizeWriteIdRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -174505,16 +177079,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FinalizeWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finalize_write_id_args.class, metaDataMap); } - public clear_file_metadata_args() { + public finalize_write_id_args() { } - public clear_file_metadata_args( - ClearFileMetadataRequest req) + public finalize_write_id_args( + FinalizeWriteIdRequest req) { this(); this.req = req; @@ -174523,14 +177097,14 @@ public clear_file_metadata_args( /** * Performs a deep copy on other. */ - public clear_file_metadata_args(clear_file_metadata_args other) { + public finalize_write_id_args(finalize_write_id_args other) { if (other.isSetReq()) { - this.req = new ClearFileMetadataRequest(other.req); + this.req = new FinalizeWriteIdRequest(other.req); } } - public clear_file_metadata_args deepCopy() { - return new clear_file_metadata_args(this); + public finalize_write_id_args deepCopy() { + return new finalize_write_id_args(this); } @Override @@ -174538,11 +177112,11 @@ public void clear() { this.req = null; } - public ClearFileMetadataRequest getReq() { + public FinalizeWriteIdRequest getReq() { return this.req; } - public void setReq(ClearFileMetadataRequest req) { + public void setReq(FinalizeWriteIdRequest req) { this.req = req; } @@ -174567,7 +177141,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((ClearFileMetadataRequest)value); + setReq((FinalizeWriteIdRequest)value); } break; @@ -174600,12 +177174,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof clear_file_metadata_args) - return this.equals((clear_file_metadata_args)that); + if (that instanceof finalize_write_id_args) + return this.equals((finalize_write_id_args)that); return false; } - public boolean equals(clear_file_metadata_args that) { + public boolean equals(finalize_write_id_args that) { if (that == null) return false; @@ -174634,7 +177208,7 @@ public int hashCode() { } @Override - public int compareTo(clear_file_metadata_args other) { + public int compareTo(finalize_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -174668,7 +177242,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("clear_file_metadata_args("); + StringBuilder sb = new StringBuilder("finalize_write_id_args("); boolean first = true; sb.append("req:"); @@ -174706,15 +177280,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class clear_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public clear_file_metadata_argsStandardScheme getScheme() { - return new clear_file_metadata_argsStandardScheme(); + private static class finalize_write_id_argsStandardSchemeFactory implements SchemeFactory { + public finalize_write_id_argsStandardScheme getScheme() { + return new finalize_write_id_argsStandardScheme(); } } - private static class clear_file_metadata_argsStandardScheme extends StandardScheme { + private static class finalize_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174726,7 +177300,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new ClearFileMetadataRequest(); + struct.req = new FinalizeWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -174742,7 +177316,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, finalize_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -174757,16 +177331,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadat } - private static class clear_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public clear_file_metadata_argsTupleScheme getScheme() { - return new clear_file_metadata_argsTupleScheme(); + private static class finalize_write_id_argsTupleSchemeFactory implements SchemeFactory { + public finalize_write_id_argsTupleScheme getScheme() { + return new finalize_write_id_argsTupleScheme(); } } - private static class clear_file_metadata_argsTupleScheme extends TupleScheme { + private static class finalize_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -174779,11 +177353,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new ClearFileMetadataRequest(); + struct.req = new FinalizeWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -174792,18 +177366,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_ } - public static class clear_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_result"); + public static class finalize_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finalize_write_id_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new clear_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new clear_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new finalize_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new finalize_write_id_resultTupleSchemeFactory()); } - private ClearFileMetadataResult success; // required + private FinalizeWriteIdResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -174868,16 +177442,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FinalizeWriteIdResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finalize_write_id_result.class, metaDataMap); } - public clear_file_metadata_result() { + public finalize_write_id_result() { } - public clear_file_metadata_result( - ClearFileMetadataResult success) + public finalize_write_id_result( + FinalizeWriteIdResult success) { this(); this.success = success; @@ -174886,14 +177460,14 @@ public clear_file_metadata_result( /** * Performs a deep copy on other. */ - public clear_file_metadata_result(clear_file_metadata_result other) { + public finalize_write_id_result(finalize_write_id_result other) { if (other.isSetSuccess()) { - this.success = new ClearFileMetadataResult(other.success); + this.success = new FinalizeWriteIdResult(other.success); } } - public clear_file_metadata_result deepCopy() { - return new clear_file_metadata_result(this); + public finalize_write_id_result deepCopy() { + return new finalize_write_id_result(this); } @Override @@ -174901,11 +177475,11 @@ public void clear() { this.success = null; } - public ClearFileMetadataResult getSuccess() { + public FinalizeWriteIdResult getSuccess() { return this.success; } - public void setSuccess(ClearFileMetadataResult success) { + public void setSuccess(FinalizeWriteIdResult success) { this.success = success; } @@ -174930,7 +177504,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((ClearFileMetadataResult)value); + setSuccess((FinalizeWriteIdResult)value); } break; @@ -174963,12 +177537,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof clear_file_metadata_result) - return this.equals((clear_file_metadata_result)that); + if (that instanceof finalize_write_id_result) + return this.equals((finalize_write_id_result)that); return false; } - public boolean equals(clear_file_metadata_result that) { + public boolean equals(finalize_write_id_result that) { if (that == null) return false; @@ -174997,7 +177571,7 @@ public int hashCode() { } @Override - public int compareTo(clear_file_metadata_result other) { + public int compareTo(finalize_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175031,7 +177605,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("clear_file_metadata_result("); + StringBuilder sb = new StringBuilder("finalize_write_id_result("); boolean first = true; sb.append("success:"); @@ -175069,15 +177643,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class clear_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public clear_file_metadata_resultStandardScheme getScheme() { - return new clear_file_metadata_resultStandardScheme(); + private static class finalize_write_id_resultStandardSchemeFactory implements SchemeFactory { + public finalize_write_id_resultStandardScheme getScheme() { + return new finalize_write_id_resultStandardScheme(); } } - private static class clear_file_metadata_resultStandardScheme extends StandardScheme { + private static class finalize_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175089,7 +177663,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new ClearFileMetadataResult(); + struct.success = new FinalizeWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -175105,7 +177679,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, finalize_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175120,16 +177694,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadat } - private static class clear_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public clear_file_metadata_resultTupleScheme getScheme() { - return new clear_file_metadata_resultTupleScheme(); + private static class finalize_write_id_resultTupleSchemeFactory implements SchemeFactory { + public finalize_write_id_resultTupleScheme getScheme() { + return new finalize_write_id_resultTupleScheme(); } } - private static class clear_file_metadata_resultTupleScheme extends TupleScheme { + private static class finalize_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -175142,11 +177716,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new ClearFileMetadataResult(); + struct.success = new FinalizeWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -175155,18 +177729,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_ } - public static class cache_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_args"); + public static class heartbeat_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_write_id_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new cache_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new cache_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new heartbeat_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new heartbeat_write_id_argsTupleSchemeFactory()); } - private CacheFileMetadataRequest req; // required + private HeartbeatWriteIdRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175231,16 +177805,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_write_id_args.class, metaDataMap); } - public cache_file_metadata_args() { + public heartbeat_write_id_args() { } - public cache_file_metadata_args( - CacheFileMetadataRequest req) + public heartbeat_write_id_args( + HeartbeatWriteIdRequest req) { this(); this.req = req; @@ -175249,14 +177823,14 @@ public cache_file_metadata_args( /** * Performs a deep copy on other. */ - public cache_file_metadata_args(cache_file_metadata_args other) { + public heartbeat_write_id_args(heartbeat_write_id_args other) { if (other.isSetReq()) { - this.req = new CacheFileMetadataRequest(other.req); + this.req = new HeartbeatWriteIdRequest(other.req); } } - public cache_file_metadata_args deepCopy() { - return new cache_file_metadata_args(this); + public heartbeat_write_id_args deepCopy() { + return new heartbeat_write_id_args(this); } @Override @@ -175264,11 +177838,11 @@ public void clear() { this.req = null; } - public CacheFileMetadataRequest getReq() { + public HeartbeatWriteIdRequest getReq() { return this.req; } - public void setReq(CacheFileMetadataRequest req) { + public void setReq(HeartbeatWriteIdRequest req) { this.req = req; } @@ -175293,7 +177867,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((CacheFileMetadataRequest)value); + setReq((HeartbeatWriteIdRequest)value); } break; @@ -175326,12 +177900,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof cache_file_metadata_args) - return this.equals((cache_file_metadata_args)that); + if (that instanceof heartbeat_write_id_args) + return this.equals((heartbeat_write_id_args)that); return false; } - public boolean equals(cache_file_metadata_args that) { + public boolean equals(heartbeat_write_id_args that) { if (that == null) return false; @@ -175360,7 +177934,7 @@ public int hashCode() { } @Override - public int compareTo(cache_file_metadata_args other) { + public int compareTo(heartbeat_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175394,7 +177968,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("cache_file_metadata_args("); + StringBuilder sb = new StringBuilder("heartbeat_write_id_args("); boolean first = true; sb.append("req:"); @@ -175432,15 +178006,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class cache_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public cache_file_metadata_argsStandardScheme getScheme() { - return new cache_file_metadata_argsStandardScheme(); + private static class heartbeat_write_id_argsStandardSchemeFactory implements SchemeFactory { + public heartbeat_write_id_argsStandardScheme getScheme() { + return new heartbeat_write_id_argsStandardScheme(); } } - private static class cache_file_metadata_argsStandardScheme extends StandardScheme { + private static class heartbeat_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175452,7 +178026,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new CacheFileMetadataRequest(); + struct.req = new HeartbeatWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -175468,7 +178042,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175483,16 +178057,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadat } - private static class cache_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public cache_file_metadata_argsTupleScheme getScheme() { - return new cache_file_metadata_argsTupleScheme(); + private static class heartbeat_write_id_argsTupleSchemeFactory implements SchemeFactory { + public heartbeat_write_id_argsTupleScheme getScheme() { + return new heartbeat_write_id_argsTupleScheme(); } } - private static class cache_file_metadata_argsTupleScheme extends TupleScheme { + private static class heartbeat_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -175505,11 +178079,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new CacheFileMetadataRequest(); + struct.req = new HeartbeatWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -175518,18 +178092,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_ } - public static class cache_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_result"); + public static class heartbeat_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_write_id_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new cache_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new cache_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new heartbeat_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new heartbeat_write_id_resultTupleSchemeFactory()); } - private CacheFileMetadataResult success; // required + private HeartbeatWriteIdResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175594,16 +178168,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatWriteIdResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_write_id_result.class, metaDataMap); } - public cache_file_metadata_result() { + public heartbeat_write_id_result() { } - public cache_file_metadata_result( - CacheFileMetadataResult success) + public heartbeat_write_id_result( + HeartbeatWriteIdResult success) { this(); this.success = success; @@ -175612,14 +178186,14 @@ public cache_file_metadata_result( /** * Performs a deep copy on other. */ - public cache_file_metadata_result(cache_file_metadata_result other) { + public heartbeat_write_id_result(heartbeat_write_id_result other) { if (other.isSetSuccess()) { - this.success = new CacheFileMetadataResult(other.success); + this.success = new HeartbeatWriteIdResult(other.success); } } - public cache_file_metadata_result deepCopy() { - return new cache_file_metadata_result(this); + public heartbeat_write_id_result deepCopy() { + return new heartbeat_write_id_result(this); } @Override @@ -175627,11 +178201,11 @@ public void clear() { this.success = null; } - public CacheFileMetadataResult getSuccess() { + public HeartbeatWriteIdResult getSuccess() { return this.success; } - public void setSuccess(CacheFileMetadataResult success) { + public void setSuccess(HeartbeatWriteIdResult success) { this.success = success; } @@ -175656,7 +178230,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((CacheFileMetadataResult)value); + setSuccess((HeartbeatWriteIdResult)value); } break; @@ -175689,12 +178263,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof cache_file_metadata_result) - return this.equals((cache_file_metadata_result)that); + if (that instanceof heartbeat_write_id_result) + return this.equals((heartbeat_write_id_result)that); return false; } - public boolean equals(cache_file_metadata_result that) { + public boolean equals(heartbeat_write_id_result that) { if (that == null) return false; @@ -175723,7 +178297,7 @@ public int hashCode() { } @Override - public int compareTo(cache_file_metadata_result other) { + public int compareTo(heartbeat_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175757,7 +178331,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("cache_file_metadata_result("); + StringBuilder sb = new StringBuilder("heartbeat_write_id_result("); boolean first = true; sb.append("success:"); @@ -175795,15 +178369,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class cache_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public cache_file_metadata_resultStandardScheme getScheme() { - return new cache_file_metadata_resultStandardScheme(); + private static class heartbeat_write_id_resultStandardSchemeFactory implements SchemeFactory { + public heartbeat_write_id_resultStandardScheme getScheme() { + return new heartbeat_write_id_resultStandardScheme(); } } - private static class cache_file_metadata_resultStandardScheme extends StandardScheme { + private static class heartbeat_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175815,7 +178389,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new CacheFileMetadataResult(); + struct.success = new HeartbeatWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -175831,7 +178405,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175846,16 +178420,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadat } - private static class cache_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public cache_file_metadata_resultTupleScheme getScheme() { - return new cache_file_metadata_resultTupleScheme(); + private static class heartbeat_write_id_resultTupleSchemeFactory implements SchemeFactory { + public heartbeat_write_id_resultTupleScheme getScheme() { + return new heartbeat_write_id_resultTupleScheme(); } } - private static class cache_file_metadata_resultTupleScheme extends TupleScheme { + private static class heartbeat_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -175868,11 +178442,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new CacheFileMetadataResult(); + struct.success = new HeartbeatWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java index 6a5f550242d4..2503d186a492 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java @@ -6,7 +6,34 @@ */ package org.apache.hadoop.hive.metastore.api; - +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) public class hive_metastoreConstants { public static final String DDL_TIME = "transient_lastDdlTime"; @@ -58,4 +85,5 @@ public class hive_metastoreConstants { public static final String TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties"; public static final String TABLE_IS_MM = "hivecommit"; + } diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 2d82c92675bd..d228a739c532 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1163,6 +1163,21 @@ public function clear_file_metadata(\metastore\ClearFileMetadataRequest $req); * @return \metastore\CacheFileMetadataResult */ public function cache_file_metadata(\metastore\CacheFileMetadataRequest $req); + /** + * @param \metastore\GetNextWriteIdRequest $req + * @return \metastore\GetNextWriteIdResult + */ + public function get_next_write_id(\metastore\GetNextWriteIdRequest $req); + /** + * @param \metastore\FinalizeWriteIdRequest $req + * @return \metastore\FinalizeWriteIdResult + */ + public function finalize_write_id(\metastore\FinalizeWriteIdRequest $req); + /** + * @param \metastore\HeartbeatWriteIdRequest $req + * @return \metastore\HeartbeatWriteIdResult + */ + public function heartbeat_write_id(\metastore\HeartbeatWriteIdRequest $req); } class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf { @@ -9641,196 +9656,170 @@ public function recv_cache_file_metadata() throw new \Exception("cache_file_metadata failed: unknown result"); } -} - -// HELPER FUNCTIONS AND STRUCTURES + public function get_next_write_id(\metastore\GetNextWriteIdRequest $req) + { + $this->send_get_next_write_id($req); + return $this->recv_get_next_write_id(); + } -class ThriftHiveMetastore_getMetaConf_args { - static $_TSPEC; + public function send_get_next_write_id(\metastore\GetNextWriteIdRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_get_next_write_id_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_next_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_next_write_id', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } - /** - * @var string - */ - public $key = null; + public function recv_get_next_write_id() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_next_write_id_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 1 => array( - 'var' => 'key', - 'type' => TType::STRING, - ), - ); - } - if (is_array($vals)) { - if (isset($vals['key'])) { - $this->key = $vals['key']; + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } + $result = new \metastore\ThriftHiveMetastore_get_next_write_id_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("get_next_write_id failed: unknown result"); } - public function getName() { - return 'ThriftHiveMetastore_getMetaConf_args'; + public function finalize_write_id(\metastore\FinalizeWriteIdRequest $req) + { + $this->send_finalize_write_id($req); + return $this->recv_finalize_write_id(); } - public function read($input) + public function send_finalize_write_id(\metastore\FinalizeWriteIdRequest $req) { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) + $args = new \metastore\ThriftHiveMetastore_finalize_write_id_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->key); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); + thrift_protocol_write_binary($this->output_, 'finalize_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); } - $xfer += $input->readStructEnd(); - return $xfer; - } - - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args'); - if ($this->key !== null) { - $xfer += $output->writeFieldBegin('key', TType::STRING, 1); - $xfer += $output->writeString($this->key); - $xfer += $output->writeFieldEnd(); + else + { + $this->output_->writeMessageBegin('finalize_write_id', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; } -} - -class ThriftHiveMetastore_getMetaConf_result { - static $_TSPEC; - - /** - * @var string - */ - public $success = null; - /** - * @var \metastore\MetaException - */ - public $o1 = null; + public function recv_finalize_write_id() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_finalize_write_id_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; - public function __construct($vals=null) { - if (!isset(self::$_TSPEC)) { - self::$_TSPEC = array( - 0 => array( - 'var' => 'success', - 'type' => TType::STRING, - ), - 1 => array( - 'var' => 'o1', - 'type' => TType::STRUCT, - 'class' => '\metastore\MetaException', - ), - ); - } - if (is_array($vals)) { - if (isset($vals['success'])) { - $this->success = $vals['success']; - } - if (isset($vals['o1'])) { - $this->o1 = $vals['o1']; + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; } + $result = new \metastore\ThriftHiveMetastore_finalize_write_id_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("finalize_write_id failed: unknown result"); } - public function getName() { - return 'ThriftHiveMetastore_getMetaConf_result'; + public function heartbeat_write_id(\metastore\HeartbeatWriteIdRequest $req) + { + $this->send_heartbeat_write_id($req); + return $this->recv_heartbeat_write_id(); } - public function read($input) + public function send_heartbeat_write_id(\metastore\HeartbeatWriteIdRequest $req) { - $xfer = 0; - $fname = null; - $ftype = 0; - $fid = 0; - $xfer += $input->readStructBegin($fname); - while (true) + $args = new \metastore\ThriftHiveMetastore_heartbeat_write_id_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) { - $xfer += $input->readFieldBegin($fname, $ftype, $fid); - if ($ftype == TType::STOP) { - break; - } - switch ($fid) - { - case 0: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->success); - } else { - $xfer += $input->skip($ftype); - } - break; - case 1: - if ($ftype == TType::STRUCT) { - $this->o1 = new \metastore\MetaException(); - $xfer += $this->o1->read($input); - } else { - $xfer += $input->skip($ftype); - } - break; - default: - $xfer += $input->skip($ftype); - break; - } - $xfer += $input->readFieldEnd(); + thrift_protocol_write_binary($this->output_, 'heartbeat_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('heartbeat_write_id', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); } - $xfer += $input->readStructEnd(); - return $xfer; } - public function write($output) { - $xfer = 0; - $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result'); - if ($this->success !== null) { - $xfer += $output->writeFieldBegin('success', TType::STRING, 0); - $xfer += $output->writeString($this->success); - $xfer += $output->writeFieldEnd(); + public function recv_heartbeat_write_id() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_heartbeat_write_id_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_heartbeat_write_id_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); } - if ($this->o1 !== null) { - $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); - $xfer += $this->o1->write($output); - $xfer += $output->writeFieldEnd(); + if ($result->success !== null) { + return $result->success; } - $xfer += $output->writeFieldStop(); - $xfer += $output->writeStructEnd(); - return $xfer; + throw new \Exception("heartbeat_write_id failed: unknown result"); } } -class ThriftHiveMetastore_setMetaConf_args { +// HELPER FUNCTIONS AND STRUCTURES + +class ThriftHiveMetastore_getMetaConf_args { static $_TSPEC; /** * @var string */ public $key = null; - /** - * @var string - */ - public $value = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -9839,24 +9828,203 @@ public function __construct($vals=null) { 'var' => 'key', 'type' => TType::STRING, ), - 2 => array( - 'var' => 'value', - 'type' => TType::STRING, - ), ); } if (is_array($vals)) { if (isset($vals['key'])) { $this->key = $vals['key']; } - if (isset($vals['value'])) { - $this->value = $vals['value']; - } } } public function getName() { - return 'ThriftHiveMetastore_setMetaConf_args'; + return 'ThriftHiveMetastore_getMetaConf_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->key); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_args'); + if ($this->key !== null) { + $xfer += $output->writeFieldBegin('key', TType::STRING, 1); + $xfer += $output->writeString($this->key); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_getMetaConf_result { + static $_TSPEC; + + /** + * @var string + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRING, + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_getMetaConf_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->success); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_getMetaConf_result'); + if ($this->success !== null) { + $xfer += $output->writeFieldBegin('success', TType::STRING, 0); + $xfer += $output->writeString($this->success); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_setMetaConf_args { + static $_TSPEC; + + /** + * @var string + */ + public $key = null; + /** + * @var string + */ + public $value = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'key', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'value', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['key'])) { + $this->key = $vals['key']; + } + if (isset($vals['value'])) { + $this->value = $vals['value']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_setMetaConf_args'; } public function read($input) @@ -44017,4 +44185,484 @@ public function write($output) { } +class ThriftHiveMetastore_get_next_write_id_args { + static $_TSPEC; + + /** + * @var \metastore\GetNextWriteIdRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetNextWriteIdRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_next_write_id_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\GetNextWriteIdRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_next_write_id_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_next_write_id_result { + static $_TSPEC; + + /** + * @var \metastore\GetNextWriteIdResult + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetNextWriteIdResult', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_next_write_id_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\GetNextWriteIdResult(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_next_write_id_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_finalize_write_id_args { + static $_TSPEC; + + /** + * @var \metastore\FinalizeWriteIdRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\FinalizeWriteIdRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_finalize_write_id_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\FinalizeWriteIdRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_finalize_write_id_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_finalize_write_id_result { + static $_TSPEC; + + /** + * @var \metastore\FinalizeWriteIdResult + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\FinalizeWriteIdResult', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_finalize_write_id_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\FinalizeWriteIdResult(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_finalize_write_id_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_heartbeat_write_id_args { + static $_TSPEC; + + /** + * @var \metastore\HeartbeatWriteIdRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\HeartbeatWriteIdRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_heartbeat_write_id_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\HeartbeatWriteIdRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_heartbeat_write_id_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_heartbeat_write_id_result { + static $_TSPEC; + + /** + * @var \metastore\HeartbeatWriteIdResult + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\HeartbeatWriteIdResult', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_heartbeat_write_id_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\HeartbeatWriteIdResult(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_heartbeat_write_id_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php index 2f9cc9b7de97..78eb36529e0b 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -4556,6 +4556,14 @@ class Table { * @var bool */ public $temporary = false; + /** + * @var int + */ + public $mmNextWriteId = null; + /** + * @var int + */ + public $mmWatermarkWriteId = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -4631,6 +4639,14 @@ public function __construct($vals=null) { 'var' => 'temporary', 'type' => TType::BOOL, ), + 15 => array( + 'var' => 'mmNextWriteId', + 'type' => TType::I64, + ), + 16 => array( + 'var' => 'mmWatermarkWriteId', + 'type' => TType::I64, + ), ); } if (is_array($vals)) { @@ -4676,6 +4692,12 @@ public function __construct($vals=null) { if (isset($vals['temporary'])) { $this->temporary = $vals['temporary']; } + if (isset($vals['mmNextWriteId'])) { + $this->mmNextWriteId = $vals['mmNextWriteId']; + } + if (isset($vals['mmWatermarkWriteId'])) { + $this->mmWatermarkWriteId = $vals['mmWatermarkWriteId']; + } } } @@ -4822,6 +4844,20 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 15: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->mmNextWriteId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 16: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->mmWatermarkWriteId); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -4936,6 +4972,16 @@ public function write($output) { $xfer += $output->writeBool($this->temporary); $xfer += $output->writeFieldEnd(); } + if ($this->mmNextWriteId !== null) { + $xfer += $output->writeFieldBegin('mmNextWriteId', TType::I64, 15); + $xfer += $output->writeI64($this->mmNextWriteId); + $xfer += $output->writeFieldEnd(); + } + if ($this->mmWatermarkWriteId !== null) { + $xfer += $output->writeFieldBegin('mmWatermarkWriteId', TType::I64, 16); + $xfer += $output->writeI64($this->mmWatermarkWriteId); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -17394,37 +17440,43 @@ public function write($output) { } -class GetAllFunctionsResponse { +class GetNextWriteIdRequest { static $_TSPEC; /** - * @var \metastore\Function[] + * @var string */ - public $functions = null; + public $dbName = null; + /** + * @var string + */ + public $tblName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'functions', - 'type' => TType::LST, - 'etype' => TType::STRUCT, - 'elem' => array( - 'type' => TType::STRUCT, - 'class' => '\metastore\Function', - ), + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tblName', + 'type' => TType::STRING, ), ); } if (is_array($vals)) { - if (isset($vals['functions'])) { - $this->functions = $vals['functions']; + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tblName'])) { + $this->tblName = $vals['tblName']; } } } public function getName() { - return 'GetAllFunctionsResponse'; + return 'GetNextWriteIdRequest'; } public function read($input) @@ -17443,19 +17495,15 @@ public function read($input) switch ($fid) { case 1: - if ($ftype == TType::LST) { - $this->functions = array(); - $_size562 = 0; - $_etype565 = 0; - $xfer += $input->readListBegin($_etype565, $_size562); - for ($_i566 = 0; $_i566 < $_size562; ++$_i566) - { - $elem567 = null; - $elem567 = new \metastore\Function(); - $xfer += $elem567->read($input); - $this->functions []= $elem567; - } - $xfer += $input->readListEnd(); + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tblName); } else { $xfer += $input->skip($ftype); } @@ -17472,22 +17520,90 @@ public function read($input) public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('GetAllFunctionsResponse'); - if ($this->functions !== null) { - if (!is_array($this->functions)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + $xfer += $output->writeStructBegin('GetNextWriteIdRequest'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tblName !== null) { + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeString($this->tblName); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetNextWriteIdResult { + static $_TSPEC; + + /** + * @var int + */ + public $writeId = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; } - $xfer += $output->writeFieldBegin('functions', TType::LST, 1); + } + } + + public function getName() { + return 'GetNextWriteIdResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) { - $output->writeListBegin(TType::STRUCT, count($this->functions)); - { - foreach ($this->functions as $iter568) - { - $xfer += $iter568->write($output); + case 1: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); } - } - $output->writeListEnd(); + break; + default: + $xfer += $input->skip($ftype); + break; } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetNextWriteIdResult'); + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 1); + $xfer += $output->writeI64($this->writeId); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -17497,7 +17613,7 @@ public function write($output) { } -class TableMeta { +class FinalizeWriteIdRequest { static $_TSPEC; /** @@ -17507,15 +17623,15 @@ class TableMeta { /** * @var string */ - public $tableName = null; + public $tblName = null; /** - * @var string + * @var int */ - public $tableType = null; + public $writeId = null; /** - * @var string + * @var bool */ - public $comments = null; + public $commit = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -17525,16 +17641,16 @@ public function __construct($vals=null) { 'type' => TType::STRING, ), 2 => array( - 'var' => 'tableName', + 'var' => 'tblName', 'type' => TType::STRING, ), 3 => array( - 'var' => 'tableType', - 'type' => TType::STRING, + 'var' => 'writeId', + 'type' => TType::I64, ), 4 => array( - 'var' => 'comments', - 'type' => TType::STRING, + 'var' => 'commit', + 'type' => TType::BOOL, ), ); } @@ -17542,20 +17658,20 @@ public function __construct($vals=null) { if (isset($vals['dbName'])) { $this->dbName = $vals['dbName']; } - if (isset($vals['tableName'])) { - $this->tableName = $vals['tableName']; + if (isset($vals['tblName'])) { + $this->tblName = $vals['tblName']; } - if (isset($vals['tableType'])) { - $this->tableType = $vals['tableType']; + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; } - if (isset($vals['comments'])) { - $this->comments = $vals['comments']; + if (isset($vals['commit'])) { + $this->commit = $vals['commit']; } } } public function getName() { - return 'TableMeta'; + return 'FinalizeWriteIdRequest'; } public function read($input) @@ -17582,21 +17698,21 @@ public function read($input) break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->tableName); + $xfer += $input->readString($this->tblName); } else { $xfer += $input->skip($ftype); } break; case 3: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->tableType); + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); } else { $xfer += $input->skip($ftype); } break; case 4: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->comments); + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->commit); } else { $xfer += $input->skip($ftype); } @@ -17613,25 +17729,25 @@ public function read($input) public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('TableMeta'); + $xfer += $output->writeStructBegin('FinalizeWriteIdRequest'); if ($this->dbName !== null) { $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } - if ($this->tableName !== null) { - $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2); - $xfer += $output->writeString($this->tableName); + if ($this->tblName !== null) { + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeString($this->tblName); $xfer += $output->writeFieldEnd(); } - if ($this->tableType !== null) { - $xfer += $output->writeFieldBegin('tableType', TType::STRING, 3); - $xfer += $output->writeString($this->tableType); + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 3); + $xfer += $output->writeI64($this->writeId); $xfer += $output->writeFieldEnd(); } - if ($this->comments !== null) { - $xfer += $output->writeFieldBegin('comments', TType::STRING, 4); - $xfer += $output->writeString($this->comments); + if ($this->commit !== null) { + $xfer += $output->writeFieldBegin('commit', TType::BOOL, 4); + $xfer += $output->writeBool($this->commit); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -17641,32 +17757,19 @@ public function write($output) { } -class MetaException extends TException { +class FinalizeWriteIdResult { static $_TSPEC; - /** - * @var string - */ - public $message = null; - public function __construct($vals=null) { + public function __construct() { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 1 => array( - 'var' => 'message', - 'type' => TType::STRING, - ), ); } - if (is_array($vals)) { - if (isset($vals['message'])) { - $this->message = $vals['message']; - } - } } public function getName() { - return 'MetaException'; + return 'FinalizeWriteIdResult'; } public function read($input) @@ -17684,13 +17787,6 @@ public function read($input) } switch ($fid) { - case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->message); - } else { - $xfer += $input->skip($ftype); - } - break; default: $xfer += $input->skip($ftype); break; @@ -17703,12 +17799,7 @@ public function read($input) public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('MetaException'); - if ($this->message !== null) { - $xfer += $output->writeFieldBegin('message', TType::STRING, 1); - $xfer += $output->writeString($this->message); - $xfer += $output->writeFieldEnd(); - } + $xfer += $output->writeStructBegin('FinalizeWriteIdResult'); $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -17716,26 +17807,519 @@ public function write($output) { } -class UnknownTableException extends TException { +class HeartbeatWriteIdRequest { static $_TSPEC; /** * @var string */ - public $message = null; + public $dbName = null; + /** + * @var string + */ + public $tblName = null; + /** + * @var int + */ + public $writeId = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'message', + 'var' => 'dbName', 'type' => TType::STRING, ), - ); - } - if (is_array($vals)) { - if (isset($vals['message'])) { - $this->message = $vals['message']; + 2 => array( + 'var' => 'tblName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tblName'])) { + $this->tblName = $vals['tblName']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + } + } + + public function getName() { + return 'HeartbeatWriteIdRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tblName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('HeartbeatWriteIdRequest'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tblName !== null) { + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeString($this->tblName); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 3); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class HeartbeatWriteIdResult { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'HeartbeatWriteIdResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('HeartbeatWriteIdResult'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetAllFunctionsResponse { + static $_TSPEC; + + /** + * @var \metastore\Function[] + */ + public $functions = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'functions', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\Function', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['functions'])) { + $this->functions = $vals['functions']; + } + } + } + + public function getName() { + return 'GetAllFunctionsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->functions = array(); + $_size562 = 0; + $_etype565 = 0; + $xfer += $input->readListBegin($_etype565, $_size562); + for ($_i566 = 0; $_i566 < $_size562; ++$_i566) + { + $elem567 = null; + $elem567 = new \metastore\Function(); + $xfer += $elem567->read($input); + $this->functions []= $elem567; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetAllFunctionsResponse'); + if ($this->functions !== null) { + if (!is_array($this->functions)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('functions', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->functions)); + { + foreach ($this->functions as $iter568) + { + $xfer += $iter568->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class TableMeta { + static $_TSPEC; + + /** + * @var string + */ + public $dbName = null; + /** + * @var string + */ + public $tableName = null; + /** + * @var string + */ + public $tableType = null; + /** + * @var string + */ + public $comments = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tableName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'tableType', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'comments', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + if (isset($vals['tableType'])) { + $this->tableType = $vals['tableType']; + } + if (isset($vals['comments'])) { + $this->comments = $vals['comments']; + } + } + } + + public function getName() { + return 'TableMeta'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableType); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->comments); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TableMeta'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2); + $xfer += $output->writeString($this->tableName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableType !== null) { + $xfer += $output->writeFieldBegin('tableType', TType::STRING, 3); + $xfer += $output->writeString($this->tableType); + $xfer += $output->writeFieldEnd(); + } + if ($this->comments !== null) { + $xfer += $output->writeFieldBegin('comments', TType::STRING, 4); + $xfer += $output->writeString($this->comments); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class MetaException extends TException { + static $_TSPEC; + + /** + * @var string + */ + public $message = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'message', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['message'])) { + $this->message = $vals['message']; + } + } + } + + public function getName() { + return 'MetaException'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->message); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('MetaException'); + if ($this->message !== null) { + $xfer += $output->writeFieldBegin('message', TType::STRING, 1); + $xfer += $output->writeString($this->message); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class UnknownTableException extends TException { + static $_TSPEC; + + /** + * @var string + */ + public $message = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'message', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['message'])) { + $this->message = $vals['message']; } } } @@ -18866,6 +19450,7 @@ final class Constant extends \Thrift\Type\TConstant { static protected $TABLE_IS_TRANSACTIONAL; static protected $TABLE_NO_AUTO_COMPACT; static protected $TABLE_TRANSACTIONAL_PROPERTIES; + static protected $TABLE_IS_MM; static protected function init_DDL_TIME() { return "transient_lastDdlTime"; @@ -18962,6 +19547,10 @@ static protected function init_TABLE_NO_AUTO_COMPACT() { static protected function init_TABLE_TRANSACTIONAL_PROPERTIES() { return "transactional_properties"; } + + static protected function init_TABLE_IS_MM() { + return "hivecommit"; + } } diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index c58ccf25dca4..13be322fc008 100755 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -173,6 +173,9 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req)') print(' ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req)') print(' CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req)') + print(' GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req)') + print(' FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req)') + print(' HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req)') print(' string getName()') print(' string getVersion()') print(' fb_status getStatus()') @@ -1136,6 +1139,24 @@ elif cmd == 'cache_file_metadata': sys.exit(1) pp.pprint(client.cache_file_metadata(eval(args[0]),)) +elif cmd == 'get_next_write_id': + if len(args) != 1: + print('get_next_write_id requires 1 args') + sys.exit(1) + pp.pprint(client.get_next_write_id(eval(args[0]),)) + +elif cmd == 'finalize_write_id': + if len(args) != 1: + print('finalize_write_id requires 1 args') + sys.exit(1) + pp.pprint(client.finalize_write_id(eval(args[0]),)) + +elif cmd == 'heartbeat_write_id': + if len(args) != 1: + print('heartbeat_write_id requires 1 args') + sys.exit(1) + pp.pprint(client.heartbeat_write_id(eval(args[0]),)) + elif cmd == 'getName': if len(args) != 0: print('getName requires 0 args') diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 57a748a04c23..137764878219 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1200,6 +1200,27 @@ def cache_file_metadata(self, req): """ pass + def get_next_write_id(self, req): + """ + Parameters: + - req + """ + pass + + def finalize_write_id(self, req): + """ + Parameters: + - req + """ + pass + + def heartbeat_write_id(self, req): + """ + Parameters: + - req + """ + pass + class Client(fb303.FacebookService.Client, Iface): """ @@ -6596,6 +6617,99 @@ def recv_cache_file_metadata(self): return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "cache_file_metadata failed: unknown result") + def get_next_write_id(self, req): + """ + Parameters: + - req + """ + self.send_get_next_write_id(req) + return self.recv_get_next_write_id() + + def send_get_next_write_id(self, req): + self._oprot.writeMessageBegin('get_next_write_id', TMessageType.CALL, self._seqid) + args = get_next_write_id_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_next_write_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_next_write_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_next_write_id failed: unknown result") + + def finalize_write_id(self, req): + """ + Parameters: + - req + """ + self.send_finalize_write_id(req) + return self.recv_finalize_write_id() + + def send_finalize_write_id(self, req): + self._oprot.writeMessageBegin('finalize_write_id', TMessageType.CALL, self._seqid) + args = finalize_write_id_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_finalize_write_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = finalize_write_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "finalize_write_id failed: unknown result") + + def heartbeat_write_id(self, req): + """ + Parameters: + - req + """ + self.send_heartbeat_write_id(req) + return self.recv_heartbeat_write_id() + + def send_heartbeat_write_id(self, req): + self._oprot.writeMessageBegin('heartbeat_write_id', TMessageType.CALL, self._seqid) + args = heartbeat_write_id_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_heartbeat_write_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = heartbeat_write_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "heartbeat_write_id failed: unknown result") + class Processor(fb303.FacebookService.Processor, Iface, TProcessor): def __init__(self, handler): @@ -6749,6 +6863,9 @@ def __init__(self, handler): self._processMap["put_file_metadata"] = Processor.process_put_file_metadata self._processMap["clear_file_metadata"] = Processor.process_clear_file_metadata self._processMap["cache_file_metadata"] = Processor.process_cache_file_metadata + self._processMap["get_next_write_id"] = Processor.process_get_next_write_id + self._processMap["finalize_write_id"] = Processor.process_finalize_write_id + self._processMap["heartbeat_write_id"] = Processor.process_heartbeat_write_id def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() @@ -10412,6 +10529,63 @@ def process_cache_file_metadata(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_next_write_id(self, seqid, iprot, oprot): + args = get_next_write_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_next_write_id_result() + try: + result.success = self._handler.get_next_write_id(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_next_write_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_finalize_write_id(self, seqid, iprot, oprot): + args = finalize_write_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = finalize_write_id_result() + try: + result.success = self._handler.finalize_write_id(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("finalize_write_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_heartbeat_write_id(self, seqid, iprot, oprot): + args = heartbeat_write_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = heartbeat_write_id_result() + try: + result.success = self._handler.heartbeat_write_id(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("heartbeat_write_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + # HELPER FUNCTIONS AND STRUCTURES @@ -35698,6 +35872,399 @@ def validate(self): return + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_next_write_id_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (GetNextWriteIdRequest, GetNextWriteIdRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetNextWriteIdRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_next_write_id_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_next_write_id_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GetNextWriteIdResult, GetNextWriteIdResult.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetNextWriteIdResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_next_write_id_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class finalize_write_id_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (FinalizeWriteIdRequest, FinalizeWriteIdRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = FinalizeWriteIdRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('finalize_write_id_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class finalize_write_id_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (FinalizeWriteIdResult, FinalizeWriteIdResult.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = FinalizeWriteIdResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('finalize_write_id_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class heartbeat_write_id_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (HeartbeatWriteIdRequest, HeartbeatWriteIdRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = HeartbeatWriteIdRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('heartbeat_write_id_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class heartbeat_write_id_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (HeartbeatWriteIdResult, HeartbeatWriteIdResult.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = HeartbeatWriteIdResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('heartbeat_write_id_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py b/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py index 5100236afa24..6232737eb55d 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py @@ -33,3 +33,4 @@ TABLE_IS_TRANSACTIONAL = "transactional" TABLE_NO_AUTO_COMPACT = "no_auto_compaction" TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties" +TABLE_IS_MM = "hivecommit" diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 2d308c9b0bae..8decc94d783c 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -3122,6 +3122,8 @@ class Table: - tableType - privileges - temporary + - mmNextWriteId + - mmWatermarkWriteId """ thrift_spec = ( @@ -3140,9 +3142,11 @@ class Table: (12, TType.STRING, 'tableType', None, None, ), # 12 (13, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 13 (14, TType.BOOL, 'temporary', None, False, ), # 14 + (15, TType.I64, 'mmNextWriteId', None, None, ), # 15 + (16, TType.I64, 'mmWatermarkWriteId', None, None, ), # 16 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], mmNextWriteId=None, mmWatermarkWriteId=None,): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -3157,6 +3161,8 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.tableType = tableType self.privileges = privileges self.temporary = temporary + self.mmNextWriteId = mmNextWriteId + self.mmWatermarkWriteId = mmWatermarkWriteId def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -3251,6 +3257,16 @@ def read(self, iprot): self.temporary = iprot.readBool() else: iprot.skip(ftype) + elif fid == 15: + if ftype == TType.I64: + self.mmNextWriteId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 16: + if ftype == TType.I64: + self.mmWatermarkWriteId = iprot.readI64() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -3324,6 +3340,14 @@ def write(self, oprot): oprot.writeFieldBegin('temporary', TType.BOOL, 14) oprot.writeBool(self.temporary) oprot.writeFieldEnd() + if self.mmNextWriteId is not None: + oprot.writeFieldBegin('mmNextWriteId', TType.I64, 15) + oprot.writeI64(self.mmNextWriteId) + oprot.writeFieldEnd() + if self.mmWatermarkWriteId is not None: + oprot.writeFieldBegin('mmWatermarkWriteId', TType.I64, 16) + oprot.writeI64(self.mmWatermarkWriteId) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -3347,6 +3371,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.tableType) value = (value * 31) ^ hash(self.privileges) value = (value * 31) ^ hash(self.temporary) + value = (value * 31) ^ hash(self.mmNextWriteId) + value = (value * 31) ^ hash(self.mmWatermarkWriteId) return value def __repr__(self): @@ -12191,6 +12217,456 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class GetNextWriteIdRequest: + """ + Attributes: + - dbName + - tblName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tblName', None, None, ), # 2 + ) + + def __init__(self, dbName=None, tblName=None,): + self.dbName = dbName + self.tblName = tblName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetNextWriteIdRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeString(self.tblName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tblName is None: + raise TProtocol.TProtocolException(message='Required field tblName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tblName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetNextWriteIdResult: + """ + Attributes: + - writeId + """ + + thrift_spec = ( + None, # 0 + (1, TType.I64, 'writeId', None, None, ), # 1 + ) + + def __init__(self, writeId=None,): + self.writeId = writeId + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetNextWriteIdResult') + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 1) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.writeId is None: + raise TProtocol.TProtocolException(message='Required field writeId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.writeId) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class FinalizeWriteIdRequest: + """ + Attributes: + - dbName + - tblName + - writeId + - commit + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tblName', None, None, ), # 2 + (3, TType.I64, 'writeId', None, None, ), # 3 + (4, TType.BOOL, 'commit', None, None, ), # 4 + ) + + def __init__(self, dbName=None, tblName=None, writeId=None, commit=None,): + self.dbName = dbName + self.tblName = tblName + self.writeId = writeId + self.commit = commit + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.commit = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('FinalizeWriteIdRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeString(self.tblName) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 3) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.commit is not None: + oprot.writeFieldBegin('commit', TType.BOOL, 4) + oprot.writeBool(self.commit) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tblName is None: + raise TProtocol.TProtocolException(message='Required field tblName is unset!') + if self.writeId is None: + raise TProtocol.TProtocolException(message='Required field writeId is unset!') + if self.commit is None: + raise TProtocol.TProtocolException(message='Required field commit is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tblName) + value = (value * 31) ^ hash(self.writeId) + value = (value * 31) ^ hash(self.commit) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class FinalizeWriteIdResult: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('FinalizeWriteIdResult') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class HeartbeatWriteIdRequest: + """ + Attributes: + - dbName + - tblName + - writeId + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tblName', None, None, ), # 2 + (3, TType.I64, 'writeId', None, None, ), # 3 + ) + + def __init__(self, dbName=None, tblName=None, writeId=None,): + self.dbName = dbName + self.tblName = tblName + self.writeId = writeId + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('HeartbeatWriteIdRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeString(self.tblName) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 3) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tblName is None: + raise TProtocol.TProtocolException(message='Required field tblName is unset!') + if self.writeId is None: + raise TProtocol.TProtocolException(message='Required field writeId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tblName) + value = (value * 31) ^ hash(self.writeId) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class HeartbeatWriteIdResult: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('HeartbeatWriteIdResult') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class GetAllFunctionsResponse: """ Attributes: diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb index 6aa7143c76b0..118a54edd179 100644 --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb @@ -55,3 +55,5 @@ TABLE_TRANSACTIONAL_PROPERTIES = %q"transactional_properties" +TABLE_IS_MM = %q"hivecommit" + diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index bd94e98fce9d..95f20753bdbf 100644 --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -728,6 +728,8 @@ class Table TABLETYPE = 12 PRIVILEGES = 13 TEMPORARY = 14 + MMNEXTWRITEID = 15 + MMWATERMARKWRITEID = 16 FIELDS = { TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, @@ -743,7 +745,9 @@ class Table VIEWEXPANDEDTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewExpandedText'}, TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'}, PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, - TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true} + TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true}, + MMNEXTWRITEID => {:type => ::Thrift::Types::I64, :name => 'mmNextWriteId', :optional => true}, + MMWATERMARKWRITEID => {:type => ::Thrift::Types::I64, :name => 'mmWatermarkWriteId', :optional => true} } def struct_fields; FIELDS; end @@ -2756,6 +2760,122 @@ def validate ::Thrift::Struct.generate_accessors self end +class GetNextWriteIdRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBLNAME = 2 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName + end + + ::Thrift::Struct.generate_accessors self +end + +class GetNextWriteIdResult + include ::Thrift::Struct, ::Thrift::Struct_Union + WRITEID = 1 + + FIELDS = { + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId + end + + ::Thrift::Struct.generate_accessors self +end + +class FinalizeWriteIdRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBLNAME = 2 + WRITEID = 3 + COMMIT = 4 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'}, + COMMIT => {:type => ::Thrift::Types::BOOL, :name => 'commit'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field commit is unset!') if @commit.nil? + end + + ::Thrift::Struct.generate_accessors self +end + +class FinalizeWriteIdResult + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class HeartbeatWriteIdRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBLNAME = 2 + WRITEID = 3 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId + end + + ::Thrift::Struct.generate_accessors self +end + +class HeartbeatWriteIdResult + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class GetAllFunctionsResponse include ::Thrift::Struct, ::Thrift::Struct_Union FUNCTIONS = 1 diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 51f65c6e581e..403e07f13d1d 100644 --- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -2484,6 +2484,51 @@ def recv_cache_file_metadata() raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'cache_file_metadata failed: unknown result') end + def get_next_write_id(req) + send_get_next_write_id(req) + return recv_get_next_write_id() + end + + def send_get_next_write_id(req) + send_message('get_next_write_id', Get_next_write_id_args, :req => req) + end + + def recv_get_next_write_id() + result = receive_message(Get_next_write_id_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_next_write_id failed: unknown result') + end + + def finalize_write_id(req) + send_finalize_write_id(req) + return recv_finalize_write_id() + end + + def send_finalize_write_id(req) + send_message('finalize_write_id', Finalize_write_id_args, :req => req) + end + + def recv_finalize_write_id() + result = receive_message(Finalize_write_id_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'finalize_write_id failed: unknown result') + end + + def heartbeat_write_id(req) + send_heartbeat_write_id(req) + return recv_heartbeat_write_id() + end + + def send_heartbeat_write_id(req) + send_message('heartbeat_write_id', Heartbeat_write_id_args, :req => req) + end + + def recv_heartbeat_write_id() + result = receive_message(Heartbeat_write_id_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'heartbeat_write_id failed: unknown result') + end + end class Processor < ::FacebookService::Processor @@ -4330,6 +4375,27 @@ def process_cache_file_metadata(seqid, iprot, oprot) write_result(result, oprot, 'cache_file_metadata', seqid) end + def process_get_next_write_id(seqid, iprot, oprot) + args = read_args(iprot, Get_next_write_id_args) + result = Get_next_write_id_result.new() + result.success = @handler.get_next_write_id(args.req) + write_result(result, oprot, 'get_next_write_id', seqid) + end + + def process_finalize_write_id(seqid, iprot, oprot) + args = read_args(iprot, Finalize_write_id_args) + result = Finalize_write_id_result.new() + result.success = @handler.finalize_write_id(args.req) + write_result(result, oprot, 'finalize_write_id', seqid) + end + + def process_heartbeat_write_id(seqid, iprot, oprot) + args = read_args(iprot, Heartbeat_write_id_args) + result = Heartbeat_write_id_result.new() + result.success = @handler.heartbeat_write_id(args.req) + write_result(result, oprot, 'heartbeat_write_id', seqid) + end + end # HELPER FUNCTIONS AND STRUCTURES @@ -9930,5 +9996,101 @@ def validate ::Thrift::Struct.generate_accessors self end + class Get_next_write_id_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::GetNextWriteIdRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_next_write_id_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetNextWriteIdResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Finalize_write_id_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::FinalizeWriteIdRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Finalize_write_id_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::FinalizeWriteIdResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Heartbeat_write_id_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::HeartbeatWriteIdRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Heartbeat_write_id_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::HeartbeatWriteIdResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + end diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 3f85ca6bbc93..f99bcd2e1bbb 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -48,7 +48,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.*; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; @@ -81,6 +80,7 @@ import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -181,11 +181,10 @@ protected DateFormat initialValue() { }; }; - /** - * default port on which to start the Hive server - */ public static final String ADMIN = "admin"; public static final String PUBLIC = "public"; + /** MM write states. */ + public static final char MM_WRITE_OPEN = 'o', MM_WRITE_COMMITTED = 'c', MM_WRITE_ABORTED = 'a'; private static HadoopThriftAuthBridge.Server saslServer; private static HiveDelegationTokenManager delegationTokenManager; @@ -1253,13 +1252,7 @@ public Type get_type(final String name) throws MetaException, NoSuchObjectExcept } } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("get_type", ret != null, ex); } @@ -1302,13 +1295,7 @@ public boolean drop_type(final String name) throws MetaException, NoSuchObjectEx success = getMS().dropType(name); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("drop_type", success, ex); } @@ -1863,13 +1850,7 @@ public void drop_table_with_environment_context(final String dbname, final Strin throw new MetaException(e.getMessage()); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("drop_table", success, ex, name); } @@ -1941,7 +1922,7 @@ public List get_table_meta(String dbnames, String tblNames, List get_partitions(final String db_name, final String tbl_nam ret = getMS().getPartitions(db_name, tbl_name, max_parts); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("get_partitions", ret != null, ex, tbl_name); } @@ -6443,13 +6406,7 @@ public PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request) ret = getMS().getPrimaryKeys(db_name, tbl_name); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("get_primary_keys", ret != null, ex, tbl_name); } @@ -6473,18 +6430,142 @@ public ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request) throws M foreign_db_name, foreign_tbl_name); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("get_foreign_keys", ret != null, ex, foreign_tbl_name); } return new ForeignKeysResponse(ret); } + + private void throwMetaException(Exception e) throws MetaException, + NoSuchObjectException { + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } + + @Override + public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws TException { + RawStore ms = getMS(); + String dbName = req.getDbName(), tblName = req.getTblName(); + startFunction("get_next_write_id", " : db=" + dbName + " tbl=" + tblName); + Exception ex = null; + long writeId = -1; + // TODO# see TXN about how to handle conflicts + try { + boolean ok = false; + ms.openTransaction(); + try { + Table tbl = ms.getTable(dbName, tblName); + if (tbl == null) { + throw new NoSuchObjectException(dbName + "." + tblName); + } + writeId = tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : 0; + tbl.setMmNextWriteId(writeId + 1); + ms.alterTable(dbName, tblName, tbl); + ok = true; + } finally { + commitOrRollback(ms, ok); + } + // Do a separate txn after we have reserved the number. TODO: If we fail, ignore on read. + ok = false; + ms.openTransaction(); + try { + Table tbl = ms.getTable(dbName, tblName); + ms.createTableWrite(tbl, writeId, MM_WRITE_OPEN, System.currentTimeMillis()); + ok = true; + } finally { + commitOrRollback(ms, ok); + } + } catch (Exception e) { + ex = e; + throwMetaException(e); + } finally { + endFunction("get_next_write_id", ex == null, ex, tblName); + } + return new GetNextWriteIdResult(writeId); + } + + @Override + public FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req) throws TException { + RawStore ms = getMS(); + String dbName = req.getDbName(), tblName = req.getTblName(); + long writeId = req.getWriteId(); + boolean commit = req.isCommit(); + startFunction("finalize_write_id", " : db=" + dbName + " tbl=" + tblName + + " writeId=" + writeId + " commit=" + commit); + Exception ex = null; + try { + boolean ok = false; + ms.openTransaction(); + try { + MTableWrite tw = getActiveTableWrite(ms, dbName, tblName, writeId); + tw.setState(String.valueOf(commit ? MM_WRITE_COMMITTED : MM_WRITE_ABORTED)); + ms.updateTableWrite(tw); + ok = true; + } finally { + commitOrRollback(ms, ok); + } + } catch (Exception e) { + ex = e; + throwMetaException(e); + } finally { + endFunction("finalize_write_id", ex == null, ex, tblName); + } + return new FinalizeWriteIdResult(); + } + + private void commitOrRollback(RawStore ms, boolean ok) throws MetaException { + if (ok) { + if (!ms.commitTransaction()) throw new MetaException("Failed to commit"); + } else { + ms.rollbackTransaction(); + } + } + + @Override + public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) + throws TException { + RawStore ms = getMS(); + String dbName = req.getDbName(), tblName = req.getTblName(); + long writeId = req.getWriteId(); + startFunction("heartbeat_write_id", " : db=" + + dbName + " tbl=" + tblName + " writeId=" + writeId); + Exception ex = null; + try { + boolean ok = false; + ms.openTransaction(); + try { + MTableWrite tw = getActiveTableWrite(ms, dbName, tblName, writeId); + tw.setLastHeartbeat(System.currentTimeMillis()); + ms.updateTableWrite(tw); + ok = true; + } finally { + commitOrRollback(ms, ok); + } + } catch (Exception e) { + ex = e; + throwMetaException(e); + } finally { + endFunction("heartbeat_write_id", ex == null, ex, tblName); + } + return new HeartbeatWriteIdResult(); + } + + private MTableWrite getActiveTableWrite(RawStore ms, String dbName, + String tblName, long writeId) throws MetaException { + MTableWrite tw = ms.getTableWrite(dbName, tblName, writeId); + assert tw.getState().length() == 1; + char state = tw.getState().charAt(0); + if (state != MM_WRITE_OPEN) { + throw new MetaException("Invalid write state to finalize: " + state); + } + return tw; + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 909d8ebdf743..6bd6d92bce37 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -27,104 +27,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveConfUtil; -import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; -import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; -import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; -import org.apache.hadoop.hive.metastore.api.AddForeignKeyRequest; -import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest; -import org.apache.hadoop.hive.metastore.api.AddPartitionsResult; -import org.apache.hadoop.hive.metastore.api.AddPrimaryKeyRequest; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.CacheFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.CacheFileMetadataResult; -import org.apache.hadoop.hive.metastore.api.CheckLockRequest; -import org.apache.hadoop.hive.metastore.api.ClearFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; -import org.apache.hadoop.hive.metastore.api.CompactionRequest; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.DataOperationType; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.DropConstraintRequest; -import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr; -import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FireEventRequest; -import org.apache.hadoop.hive.metastore.api.FireEventResponse; -import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprRequest; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprResult; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataResult; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest; -import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; -import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest; -import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse; -import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest; -import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleResponse; -import org.apache.hadoop.hive.metastore.api.GrantRevokeType; -import org.apache.hadoop.hive.metastore.api.HeartbeatRequest; -import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest; -import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.LockRequest; -import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; -import org.apache.hadoop.hive.metastore.api.NoSuchLockException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; -import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest; -import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult; -import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest; -import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PutFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.RequestPartsSpec; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; -import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.TableStatsRequest; -import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; -import org.apache.hadoop.hive.metastore.api.TxnAbortedException; -import org.apache.hadoop.hive.metastore.api.TxnOpenException; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.UnlockRequest; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.shims.ShimLoader; @@ -2484,4 +2387,21 @@ public boolean cacheFileMetadata( CacheFileMetadataResult result = client.cache_file_metadata(req); return result.isIsSupported(); } + + @Override + public long getNextTableWriteId(String dbName, String tableName) throws TException { + return client.get_next_write_id(new GetNextWriteIdRequest(dbName, tableName)).getWriteId(); + } + + @Override + public void finalizeTableWrite( + String dbName, String tableName, long writeId, boolean commit) throws TException { + client.finalize_write_id(new FinalizeWriteIdRequest(dbName, tableName, writeId, commit)); + } + + @Override + public void heartbeatTableWrite( + String dbName, String tableName, long writeId) throws TException { + client.heartbeat_write_id(new HeartbeatWriteIdRequest(dbName, tableName, writeId)); + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 8dc4e28d1102..f5d611d8d929 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -1619,4 +1619,11 @@ void addPrimaryKey(List primaryKeyCols) throws void addForeignKey(List foreignKeyCols) throws MetaException, NoSuchObjectException, TException; + + long getNextTableWriteId(String dbName, String tableName) throws TException; + + void heartbeatTableWrite(String dbName, String tableName, long writeId) throws TException; + + void finalizeTableWrite(String dbName, String tableName, long writeId, + boolean commit) throws TException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 83a3e394542e..9dc80b1c97cb 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -142,6 +142,7 @@ import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; import org.apache.hadoop.hive.metastore.model.MTablePrivilege; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.model.MType; import org.apache.hadoop.hive.metastore.model.MVersionTable; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; @@ -1053,6 +1054,11 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, pm.deletePersistentAll(tabConstraints); } + List tableWrites = listAllTableWrites(dbName, tableName); + if (tableWrites != null && tableWrites.size() > 0) { + pm.deletePersistentAll(tableWrites); + } + preDropStorageDescriptor(tbl.getSd()); // then remove the table pm.deletePersistentAll(tbl); @@ -1108,7 +1114,33 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, return mConstraints; } -@Override + + private List listAllTableWrites(String dbName, String tableName) { + List result = null; + Query query = null; + boolean success = false; + openTransaction(); + try { + String queryStr = "table.tableName == t1 && table.database.name == t2"; + query = pm.newQuery(MTableWrite.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + result = new ArrayList<>((List) query.executeWithArray(tableName, dbName)); + pm.retrieveAll(result); + success = true; + } finally { + if (success) { + commitTransaction(); + } else { + rollbackTransaction(); + } + if (query != null) { + query.closeAll(); + } + } + return result; + } + + @Override public Table getTable(String dbName, String tableName) throws MetaException { boolean commited = false; Table tbl = null; @@ -1410,11 +1442,14 @@ private Table convertToTable(MTable mtbl) throws MetaException { tableType = TableType.MANAGED_TABLE.toString(); } } - return new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl + Table t = new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl .getRetention(), convertToStorageDescriptor(mtbl.getSd()), convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()), mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType); + t.setMmNextWriteId(mtbl.getMmNextWriteId()); + t.setMmWatermarkWriteId(mtbl.getMmWatermarkWriteId()); + return t; } private MTable convertToMTable(Table tbl) throws InvalidObjectException, @@ -1452,7 +1487,8 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), tbl.getViewOriginalText(), tbl.getViewExpandedText(), - tableType); + tableType, tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : -1, + tbl.isSetMmWatermarkWriteId() ? tbl.getMmWatermarkWriteId() : -1); } private List convertToMFieldSchemas(List keys) { @@ -3218,6 +3254,8 @@ public void alterTable(String dbname, String name, Table newTable) oldt.setLastAccessTime(newt.getLastAccessTime()); oldt.setViewOriginalText(newt.getViewOriginalText()); oldt.setViewExpandedText(newt.getViewExpandedText()); + oldt.setMmNextWriteId(newt.getMmNextWriteId()); + oldt.setMmWatermarkWriteId(newt.getMmWatermarkWriteId()); // commit the changes success = commitTransaction(); @@ -8613,4 +8651,76 @@ public void dropConstraint(String dbName, String tableName, } } + @Override + public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) { + boolean success = false; + openTransaction(); + try { + MTable mtbl = getMTable(tbl.getDbName(), tbl.getTableName()); + MTableWrite tw = new MTableWrite(mtbl, writeId, String.valueOf(state), heartbeat); + pm.makePersistent(tw); + success = true; + } finally { + if (success) { + commitTransaction(); + } else { + rollbackTransaction(); + } + } + } + + @Override + public void updateTableWrite(MTableWrite tw) { + boolean success = false; + openTransaction(); + try { + pm.makePersistent(tw); + success = true; + } finally { + if (success) { + commitTransaction(); + } else { + rollbackTransaction(); + } + } + } + + @Override + public MTableWrite getTableWrite( + String dbName, String tblName, long writeId) throws MetaException { + boolean success = false; + Query query = null; + try { + openTransaction(); + dbName = HiveStringUtils.normalizeIdentifier(dbName); + tblName = HiveStringUtils.normalizeIdentifier(tblName); + MTable mtbl = getMTable(dbName, tblName); + if (mtbl == null) { + success = true; + return null; + } + query = pm.newQuery(MTableWrite.class, + "table.tableName == t1 && table.database.name == t2 && writeId == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3"); + List writes = (List) query.execute(tblName, dbName, writeId); + pm.retrieveAll(writes); + success = true; + if (writes == null || writes.isEmpty()) return null; + if (writes.size() > 1) { + throw new MetaException( + "More than one TableWrite for " + dbName + "." + tblName + " and " + writeId); + } + return writes.get(0); + } finally { + if (success) { + commitTransaction(); + } else { + rollbackTransaction(); + } + if (query != null) { + query.closeAll(); + } + } + } + } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index bbd47b8442d7..c5359cf1c395 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -680,4 +681,10 @@ void createTableWithConstraints(Table tbl, List primaryKeys, void addPrimaryKeys(List pks) throws InvalidObjectException, MetaException; void addForeignKeys(List fks) throws InvalidObjectException, MetaException; + + void updateTableWrite(MTableWrite tw); + + MTableWrite getTableWrite(String dbName, String tblName, long writeId) throws MetaException; + + void createTableWrite(Table tbl, long writeId, char state, long heartbeat); } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index c65c7a42eda4..4fbeb9ed6e05 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult; import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; @@ -2722,4 +2723,22 @@ public void addForeignKeys(List fks) throws InvalidObjectException, MetaException { // TODO: WTF? } + + @Override + public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public void updateTableWrite(MTableWrite tw) { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } } diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java index 2a78ce9c0cd5..51c62e301e29 100644 --- a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java +++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java @@ -35,6 +35,8 @@ public class MTable { private String viewOriginalText; private String viewExpandedText; private String tableType; + private long mmNextWriteId; + private long mmWatermarkWriteId; public MTable() {} @@ -55,7 +57,8 @@ public MTable() {} public MTable(String tableName, MDatabase database, MStorageDescriptor sd, String owner, int createTime, int lastAccessTime, int retention, List partitionKeys, Map parameters, - String viewOriginalText, String viewExpandedText, String tableType) { + String viewOriginalText, String viewExpandedText, String tableType, long mmNextWriteId, + long mmWatermarkWriteId) { this.tableName = tableName; this.database = database; this.sd = sd; @@ -68,6 +71,8 @@ public MTable(String tableName, MDatabase database, MStorageDescriptor sd, Strin this.viewOriginalText = viewOriginalText; this.viewExpandedText = viewExpandedText; this.tableType = tableType; + this.mmWatermarkWriteId = mmWatermarkWriteId; + this.mmNextWriteId = mmNextWriteId; } /** @@ -237,4 +242,20 @@ public void setTableType(String tableType) { public String getTableType() { return tableType; } + + public long getMmNextWriteId() { + return mmNextWriteId; + } + + public long getMmWatermarkWriteId() { + return mmWatermarkWriteId; + } + + public void setMmNextWriteId(long mmNextWriteId) { + this.mmNextWriteId = mmNextWriteId; + } + + public void setMmWatermarkWriteId(long mmWatermarkWriteId) { + this.mmWatermarkWriteId = mmWatermarkWriteId; + } } diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java new file mode 100644 index 000000000000..a7e5f3e00046 --- /dev/null +++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MTableWrite { + private MTable table; + private long writeId; + private String state; + private long lastHeartbeat; + + public MTableWrite() {} + + public MTableWrite(MTable table, long writeId, String state, long lastHeartbeat) { + this.table = table; + this.writeId = writeId; + this.state = state; + this.lastHeartbeat = lastHeartbeat; + } + + public MTable getTable() { + return table; + } + + public long getWriteId() { + return writeId; + } + + public String getState() { + return state; + } + + public long getLastHeartbeat() { + return lastHeartbeat; + } + + public void setTable(MTable table) { + this.table = table; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + } + + public void setState(String state) { + this.state = state; + } + + public void setLastHeartbeat(long lastHeartbeat) { + this.lastHeartbeat = lastHeartbeat; + } +} diff --git a/metastore/src/model/package.jdo b/metastore/src/model/package.jdo index bfd6dddcec33..512655622b96 100644 --- a/metastore/src/model/package.jdo +++ b/metastore/src/model/package.jdo @@ -182,6 +182,12 @@ + + + + + + @@ -1058,6 +1064,29 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 1ea72a0d4e7f..9fffd3ff655b 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -863,4 +864,18 @@ public void addForeignKeys(List fks) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub } + + @Override + public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) { + } + + @Override + public void updateTableWrite(MTableWrite tw) { + + } + + @Override + public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { + return null; + } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 3e6acc74a58d..a763085436b4 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -879,6 +880,19 @@ public void addForeignKeys(List fks) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub } + + @Override + public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) { + } + + @Override + public void updateTableWrite(MTableWrite tw) { + } + + @Override + public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { + return null; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java index 1013f7c07e19..e2777c84dd12 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -233,8 +233,8 @@ private Path getStagingDir(Path inputPath, boolean mkdir) { // Append task specific info to stagingPathName, instead of creating a sub-directory. // This way we don't have to worry about deleting the stagingPathName separately at // end of query execution. - // TODO# HERE - dir = fs.makeQualified(new Path(stagingPathName + "_" + getExecutionPrefix())); + dir = fs.makeQualified(new Path( + stagingPathName + "_" + this.executionId + "-" + TaskRunner.getTaskRunnerID())); LOG.debug("Created staging dir = " + dir + " for path = " + inputPath); @@ -820,10 +820,6 @@ public void setSkipTableMasking(boolean skipTableMasking) { this.skipTableMasking = skipTableMasking; } - public String getExecutionPrefix() { - return this.executionId + "-" + TaskRunner.getTaskRunnerID(); - } - public ExplainConfiguration getExplainConfig() { return explainConfig; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index b8a2c5ae7872..6a0143aa5050 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -239,7 +239,7 @@ private void commit(FileSystem fs) throws HiveException { } } if (isMmTable) { - Path manifestPath = new Path(specPath, "_tmp." + getPrefixedTaskId() + MANIFEST_EXTENSION); + Path manifestPath = new Path(specPath, "_tmp." + getMmPrefixedTaskId() + MANIFEST_EXTENSION); Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths); try { try (FSDataOutputStream out = fs.create(manifestPath)) { @@ -254,10 +254,6 @@ private void commit(FileSystem fs) throws HiveException { } } - private String getPrefixedTaskId() { - return conf.getExecutionPrefix() + "_" + taskId; - } - private void commitOneOutPath(int idx, FileSystem fs, List commitPaths) throws IOException, HiveException { if ((bDynParts || isSkewedStoredAsSubDirectories) @@ -328,10 +324,10 @@ public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeT outPaths[filesIdx] = getTaskOutPath(taskId); } else { if (!bDynParts && !isSkewedStoredAsSubDirectories) { - finalPaths[filesIdx] = getFinalPath(getPrefixedTaskId(), specPath, extension); + finalPaths[filesIdx] = getFinalPath(getMmPrefixedTaskId(), specPath, extension); } else { // TODO# wrong! - finalPaths[filesIdx] = getFinalPath(getPrefixedTaskId(), specPath, extension); + finalPaths[filesIdx] = getFinalPath(getMmPrefixedTaskId(), specPath, extension); } outPaths[filesIdx] = finalPaths[filesIdx]; } @@ -725,6 +721,10 @@ protected boolean updateProgress() { } } + private String getMmPrefixedTaskId() { + return AcidUtils.getMmFilePrefix(conf.getMmWriteId()) + taskId; + } + protected Writable recordValue; @@ -1216,16 +1216,16 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, FileSystem fs = specPath.getFileSystem(hconf); int targetLevel = (dpCtx == null) ? 1 : dpCtx.getNumDPCols(); if (!success) { - FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse( - specPath, targetLevel, fs, new ExecPrefixPathFilter(conf.getExecutionPrefix())); + FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse(specPath, targetLevel, fs, + new ExecPrefixPathFilter(AcidUtils.getMmFilePrefix(conf.getMmWriteId()))); for (FileStatus status : statuses) { Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); tryDelete(fs, status.getPath()); } return; } - FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse( - specPath, targetLevel, fs, new ExecPrefixPathFilter(conf.getExecutionPrefix())); + FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse(specPath, targetLevel, fs, + new ExecPrefixPathFilter(AcidUtils.getMmFilePrefix(conf.getMmWriteId()))); if (statuses == null) return; LinkedList results = new LinkedList<>(); List manifests = new ArrayList<>(statuses.length); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index e3646dae63fb..f2389eaf447f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.model.MMasterKey; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; @@ -312,15 +313,28 @@ public int execute(DriverContext driverContext) { checkFileFormats(db, tbd, table); + boolean isAcid = work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID; + if (tbd.isMmTable()) { + if (tbd.getReplace()) { + // TODO#: would need a list of new files to support. Then, old ones only would need + // to be removed from MS (and FS). Also, per-partition IOW is problematic for + // the prefix case. + throw new HiveException("Replace and MM are not supported"); + } + if (isAcid) { + // TODO# need to make sure ACID writes to final directories. Otherwise, might need to move. + throw new HiveException("ACID and MM are not supported"); + } + } + // Create a data container DataContainer dc = null; if (tbd.getPartitionSpec().size() == 0) { dc = new DataContainer(table.getTTable()); Utilities.LOG14535.info("loadTable called from " + tbd.getSourcePath() + " into " + tbd.getTable()); db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getReplace(), - work.isSrcLocal(), isSkewedStoredAsDirs(tbd), - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, - hasFollowingStatsTask()); + work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isAcid, hasFollowingStatsTask(), + tbd.getMmWriteId()); if (work.getOutputs() != null) { work.getOutputs().add(new WriteEntity(table, (tbd.getReplace() ? WriteEntity.WriteType.INSERT_OVERWRITE : @@ -376,11 +390,13 @@ private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd, TaskInformation ti) throws HiveException, IOException, InvalidOperationException { List partVals = MetaStoreUtils.getPvals(table.getPartCols(), tbd.getPartitionSpec()); db.validatePartitionNameCharacters(partVals); - Utilities.LOG14535.info("loadPartition called from " + tbd.getSourcePath() + " into " + tbd.getTable()); - db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(), + Utilities.LOG14535.info("loadPartition called from " + tbd.getSourcePath() + + " into " + tbd.getTable().getTableName()); + db.loadSinglePartition(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getPartitionSpec(), tbd.getReplace(), tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(), - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, hasFollowingStatsTask(), tbd.isMmTable()); + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, + hasFollowingStatsTask(), tbd.getMmWriteId()); Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); if (ti.bucketCols != null || ti.sortCols != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index cda5f39ddaa4..1ef15cbf2f3a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -1082,6 +1082,12 @@ public static boolean isAcidTable(Table table) { return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true"); } + public static boolean isMmTable(Table table) { + // TODO: perhaps it should be a 3rd value for 'transactional'? + String value = table.getProperty(hive_metastoreConstants.TABLE_IS_MM); + return value != null && value.equalsIgnoreCase("true"); + } + /** * Sets the acidOperationalProperties in the configuration object argument. * @param conf Mutable configuration object @@ -1161,4 +1167,8 @@ public static AcidOperationalProperties getAcidOperationalProperties( } return AcidOperationalProperties.parseString(resultStr); } + + public static String getMmFilePrefix(long mmWriteId) { + return "mm_" + mmWriteId + "_"; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index e43c60026bc7..5630392c5cab 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -51,6 +51,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.ConcurrentHashMap; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import javax.jdo.JDODataStoreException; @@ -1469,15 +1470,29 @@ public Database getDatabaseCurrent() throws HiveException { return getDatabase(currentDb); } - public void loadPartition(Path loadPath, String tableName, - Map partSpec, boolean replace, - boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, - boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask, boolean isMmTable) - throws HiveException { + public void loadSinglePartition(Path loadPath, String tableName, + Map partSpec, boolean replace, boolean inheritTableSpecs, + boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcid, + boolean hasFollowingStatsTask, Long mmWriteId) throws HiveException { Table tbl = getTable(tableName); - // TODO# dbl check if table is still mm for consistency + boolean isMmTableWrite = (mmWriteId != null); + Preconditions.checkState(isMmTableWrite == AcidUtils.isMmTable(tbl)); loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs, - isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask, isMmTable); + isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask, mmWriteId); + if (isMmTableWrite) { + // The assumption behind committing here is that this partition is the only one outputted + commitMmTableWrite(tbl, mmWriteId); + } + } + + + private void commitMmTableWrite(Table tbl, Long mmWriteId) + throws HiveException { + try { + getMSC().finalizeTableWrite(tbl.getDbName(), tbl.getTableName(), mmWriteId, true); + } catch (TException e) { + throw new HiveException(e); + } } /** @@ -1503,9 +1518,8 @@ public void loadPartition(Path loadPath, String tableName, */ public Partition loadPartition(Path loadPath, Table tbl, Map partSpec, boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, - boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask, boolean isMmTable) + boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask, Long mmWriteId) throws HiveException { - Path tblDataLocationPath = tbl.getDataLocation(); try { Partition oldPart = getPartition(tbl, partSpec, false); @@ -1542,12 +1556,12 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par } else { newPartPath = oldPartPath; } - List newFiles = null, mmFiles = null; - if (isMmTable) { - mmFiles = handleMicromanagedPartition( - loadPath, tbl, replace, oldPart, newPartPath, isAcid); + List newFiles = null; + if (mmWriteId != null) { + Utilities.LOG14535.info("not moving " + loadPath + " to " + newPartPath); + assert !isAcid && !replace; if (areEventsForDmlNeeded(tbl, oldPart)) { - newFiles = mmFiles; + newFiles = listFilesCreatedByQuery(loadPath, mmWriteId); } } else { if (replace || (oldPart == null && !isAcid)) { @@ -1636,21 +1650,9 @@ private boolean areEventsForDmlNeeded(Table tbl, Partition oldPart) { return conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null; } - - private List handleMicromanagedPartition(Path loadPath, Table tbl, boolean replace, - Partition oldPart, Path newPartPath, boolean isAcid) throws HiveException { - Utilities.LOG14535.info("not moving " + loadPath + " to " + newPartPath); - if (replace) { - // TODO#: would need a list of new files to support. Then, old ones only would need - // to be removed from MS (and FS). Also, per-partition IOW is problematic for - // the prefix case. - throw new HiveException("Replace and MM are not supported"); - } - if (isAcid) { - // TODO# need to make sure ACID writes to final directories. Otherwise, might need to move. - throw new HiveException("ACID and MM are not supported"); - } + private List listFilesCreatedByQuery(Path loadPath, long mmWriteId) throws HiveException { List newFiles = new ArrayList(); + final String filePrefix = AcidUtils.getMmFilePrefix(mmWriteId); FileStatus[] srcs; FileSystem srcFs; try { @@ -1664,19 +1666,27 @@ private List handleMicromanagedPartition(Path loadPath, Table tbl, boolean LOG.info("No sources specified: " + loadPath); return newFiles; } - + PathFilter subdirFilter = null; + // TODO: just like the move path, we only do one level of recursion. for (FileStatus src : srcs) { if (src.isDirectory()) { + if (subdirFilter == null) { + subdirFilter = new PathFilter() { + @Override + public boolean accept(Path path) { + return path.getName().startsWith(filePrefix); + } + }; + } try { - for (FileStatus srcFile : - srcFs.listStatus(src.getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER)) { + for (FileStatus srcFile : srcFs.listStatus(src.getPath(), subdirFilter)) { newFiles.add(srcFile.getPath()); } } catch (IOException e) { throw new HiveException(e); } - } else { + } else if (src.getPath().getName().startsWith(filePrefix)) { newFiles.add(src.getPath()); } } @@ -1878,7 +1888,7 @@ public Void call() throws Exception { Utilities.LOG14535.info("loadPartition called for DPP from " + partPath + " to " + tbl.getTableName()); Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace, true, listBucketingEnabled, - false, isAcid, hasFollowingStatsTask, false); // TODO# special case #N + false, isAcid, hasFollowingStatsTask, null); // TODO# special case #N partitionsMap.put(fullPartSpec, newPartition); if (inPlaceEligible) { @@ -1910,6 +1920,7 @@ public Void call() throws Exception { for (Future future : futures) { future.get(); } + // TODO# we would commit the txn to metastore here } catch (InterruptedException | ExecutionException e) { LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); //cancel other futures @@ -1959,8 +1970,8 @@ public Void call() throws Exception { * @param isAcid true if this is an ACID based write */ public void loadTable(Path loadPath, String tableName, boolean replace, boolean isSrcLocal, - boolean isSkewedStoreAsSubdir, boolean isAcid, boolean hasFollowingStatsTask) - throws HiveException { + boolean isSkewedStoreAsSubdir, boolean isAcid, boolean hasFollowingStatsTask, + Long mmWriteId) throws HiveException { List newFiles = null; Table tbl = getTable(tableName); @@ -1968,17 +1979,21 @@ public void loadTable(Path loadPath, String tableName, boolean replace, boolean if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) { newFiles = Collections.synchronizedList(new ArrayList()); } - if (replace) { - Path tableDest = tbl.getPath(); - replaceFiles(tableDest, loadPath, tableDest, tableDest, sessionConf, isSrcLocal); - } else { - FileSystem fs; - try { - fs = tbl.getDataLocation().getFileSystem(sessionConf); - copyFiles(sessionConf, loadPath, tbl.getPath(), fs, isSrcLocal, isAcid, newFiles); - } catch (IOException e) { - throw new HiveException("addFiles: filesystem error in check phase", e); + if (mmWriteId == null) { + if (replace) { + Path tableDest = tbl.getPath(); + replaceFiles(tableDest, loadPath, tableDest, tableDest, sessionConf, isSrcLocal); + } else { + FileSystem fs; + try { + fs = tbl.getDataLocation().getFileSystem(sessionConf); + copyFiles(sessionConf, loadPath, tbl.getPath(), fs, isSrcLocal, isAcid, newFiles); + } catch (IOException e) { + throw new HiveException("addFiles: filesystem error in check phase", e); + } } + } else { + newFiles = listFilesCreatedByQuery(loadPath, mmWriteId); } if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); @@ -2012,6 +2027,10 @@ public void loadTable(Path loadPath, String tableName, boolean replace, boolean throw new HiveException(e); } + if (mmWriteId != null) { + commitMmTableWrite(tbl, mmWriteId); + } + fireInsertEvent(tbl, null, newFiles); } @@ -3987,4 +4006,13 @@ public void addForeignKey(List foreignKeyCols) throw new HiveException(e); } } + + + public long getNextTableWriteId(String dbName, String tableName) throws HiveException { + try { + return getMSC().getNextTableWriteId(dbName, tableName); + } catch (Exception e) { + throw new HiveException(e); + } + } }; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 4e44d490ec16..bb7001a68ca5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1305,6 +1305,7 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, // // 2. Constructing a conditional task consisting of a move task and a map reduce task // + // TODO# movetask is created here; handle MM tables MoveWork dummyMv = new MoveWork(null, null, null, new LoadFileDesc(fsInputDesc.getFinalDirName(), finalName, true, null, null), false); MapWork cplan; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java index ee674430689d..e2887fdfd7e4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java @@ -71,6 +71,7 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) Task currTask = (Task) nd; Set> ops = new HashSet<>(); + /* TODO# wtf if (currTask instanceof MapRedTask) { MapRedTask mr = (MapRedTask) currTask; ops.addAll(mr.getWork().getAllOperators()); @@ -84,7 +85,7 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) for (BaseWork w : sparkWork.getAllWork()) { ops.addAll(w.getAllOperators()); } - } + }*/ setOrAnnotateStats(ops, physicalContext.getParseContext()); return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 9329e00b2f84..d5a6d2e94bf2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1056,6 +1056,7 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, partSpec == null ? new HashMap() : partSpec); ltd.setLbCtx(lbCtx); + // TODO# movetask is created here; handle MM tables Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); truncateTask.addDependentTask(moveTsk); @@ -1668,6 +1669,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, partSpec == null ? new HashMap() : partSpec); ltd.setLbCtx(lbCtx); + // TODO# movetask is created here; handle MM tables Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); mergeTask.addDependentTask(moveTsk); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index d562ddf23f8a..6cefbfcb9686 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -326,6 +326,7 @@ private Task loadTable(URI fromURI, Table table, boolean replace, Path tgtPat LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath, Utilities.getTableDesc(table), new TreeMap(), replace); + // TODO# movetask is created here; handle MM tables Task loadTableTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, false), conf); copyTask.addDependentTask(loadTableTask); @@ -400,6 +401,7 @@ private Task addSinglePartition(URI fromURI, FileSystem fs, CreateTableDesc t Utilities.getTableDesc(table), partSpec.getPartSpec(), true); loadTableWork.setInheritTableSpecs(false); + // TODO# movetask is created here; handle MM tables Task loadPartTask = TaskFactory.get(new MoveWork( getInputs(), getOutputs(), loadTableWork, null, false), conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index a49b81306bf5..9c9e6fcccdaa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -270,6 +270,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { loadTableWork.setInheritTableSpecs(false); } + // TODO# movetask is created here; handle MM tables Task childTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, true, isLocal), conf); if (rTask != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 4353d3a92cd6..92ad50df244a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -330,14 +330,6 @@ public List getLoadTableWork() { return loadTableWork; } - /** - * @param loadTableWork - * the loadTableWork to set - */ - public void setLoadTableWork(List loadTableWork) { - this.loadTableWork = loadTableWork; - } - /** * @return the loadFileWork */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 499530e6c0ba..b550235fc0d2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -267,7 +267,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { private final Map joinContext; private final Map smbMapJoinContext; private final HashMap topToTable; - private final Map fsopToTable; private final List reduceSinkOperatorsAddedByEnforceBucketingSorting; private final HashMap> topToTableProps; private QB qb; @@ -367,7 +366,6 @@ public SemanticAnalyzer(QueryState queryState) throws SemanticException { smbMapJoinContext = new HashMap(); // Must be deterministic order map for consistent q-test output across Java versions topToTable = new LinkedHashMap(); - fsopToTable = new HashMap(); reduceSinkOperatorsAddedByEnforceBucketingSorting = new ArrayList(); topToTableProps = new HashMap>(); destTableId = 1; @@ -426,7 +424,6 @@ protected void reset(boolean clearPartsCache) { opToPartToSkewedPruner.clear(); opToSamplePruner.clear(); nameToSplitSample.clear(); - fsopToTable.clear(); resultSchema = null; createVwDesc = null; viewsExpanded = null; @@ -6547,6 +6544,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) ListBucketingCtx lbCtx = null; Map partSpec = null; boolean isMmTable = false; + Long mmWriteId = null; switch (dest_type.intValue()) { case QBMetaData.DEST_TABLE: { @@ -6570,7 +6568,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } boolean isNonNativeTable = dest_tab.isNonNative(); - isMmTable = isMmTable(dest_tab); + isMmTable = AcidUtils.isMmTable(dest_tab); if (isNonNativeTable || isMmTable) { queryTmpdir = dest_path; } else { @@ -6603,9 +6601,14 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) acidOp = getAcidType(table_desc.getOutputFileFormatClass()); checkAcidConstraints(qb, table_desc, dest_tab); } + try { + mmWriteId = getMmWriteId(dest_tab, isMmTable); + } catch (HiveException e) { + throw new SemanticException(e); + } boolean isReplace = !qb.getParseInfo().isInsertIntoTable( dest_tab.getDbName(), dest_tab.getTableName()); - ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp, isReplace, isMmTable); + ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp, isReplace, mmWriteId); ltd.setLbCtx(lbCtx); loadTableWork.add(ltd); } else { @@ -6638,7 +6641,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_path = new Path(tabPath.toUri().getScheme(), tabPath.toUri() .getAuthority(), partPath.toUri().getPath()); - isMmTable = isMmTable(dest_tab); + isMmTable = AcidUtils.isMmTable(dest_tab); queryTmpdir = isMmTable ? dest_path : ctx.getTempDirForPath(dest_path); Utilities.LOG14535.info("createFS for partition specifying " + queryTmpdir + " from " + dest_path); table_desc = Utilities.getTableDesc(dest_tab); @@ -6658,7 +6661,13 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) acidOp = getAcidType(table_desc.getOutputFileFormatClass()); checkAcidConstraints(qb, table_desc, dest_tab); } - ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, isMmTable); + try { + mmWriteId = getMmWriteId(dest_tab, isMmTable); + } catch (HiveException e) { + // How is this a semantic exception? Stupid Java and signatures. + throw new SemanticException(e); + } + ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, mmWriteId); ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); ltd.setLbCtx(lbCtx); @@ -6856,13 +6865,11 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) genPartnCols(dest, input, qb, table_desc, dest_tab, rsCtx); } + assert isMmTable == (mmWriteId != null); FileSinkDesc fileSinkDesc = createFileSinkDesc(table_desc, dest_part, dest_path, currentTableId, destTableIsAcid, destTableIsTemporary, destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, - canBeMerged, isMmTable); - if (isMmTable) { - fileSinkDesc.setExecutionPrefix(ctx.getExecutionPrefix()); - } + canBeMerged, mmWriteId); Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( fileSinkDesc, fsRS, input), inputRR); @@ -6876,7 +6883,6 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) FileSinkOperator fso = (FileSinkOperator) output; fso.getConf().setTable(dest_tab); - fsopToTable.put(fso, dest_tab); // the following code is used to collect column stats when // hive.stats.autogather=true // and it is an insert overwrite or insert into table @@ -6895,10 +6901,10 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) return output; } - private static boolean isMmTable(Table table) { - // TODO: perhaps it should be a 3rd value for 'transactional'? - String value = table.getProperty(hive_metastoreConstants.TABLE_IS_MM); - return value != null && value.equalsIgnoreCase("true"); + private static Long getMmWriteId(Table tbl, boolean isMmTable) throws HiveException { + if (!isMmTable) return null; + // Get the next write ID for this table. We will prefix files with this write ID. + return Hive.get().getNextTableWriteId(tbl.getDbName(), tbl.getTableName()); } private FileSinkDesc createFileSinkDesc(TableDesc table_desc, @@ -6906,7 +6912,7 @@ private FileSinkDesc createFileSinkDesc(TableDesc table_desc, boolean destTableIsAcid, boolean destTableIsTemporary, boolean destTableIsMaterialization, Path queryTmpdir, SortBucketRSCtx rsCtx, DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx, - RowSchema fsRS, boolean canBeMerged, boolean isMmTable) throws SemanticException { + RowSchema fsRS, boolean canBeMerged, Long mmWriteId) throws SemanticException { FileSinkDesc fileSinkDesc = new FileSinkDesc( queryTmpdir, table_desc, @@ -6919,7 +6925,7 @@ private FileSinkDesc createFileSinkDesc(TableDesc table_desc, rsCtx.getPartnCols(), dpCtx, dest_path, - isMmTable); + mmWriteId); fileSinkDesc.setHiveServerQuery(SessionState.get().isHiveServerQuery()); // If this is an insert, update, or delete on an ACID table then mark that so the diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index fb5ca57420e4..723719da5d21 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -206,6 +206,7 @@ public void compile(final ParseContext pCtx, final List tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); mvTask.add(tsk); // Check to see if we are stale'ing any indexes and auto-update them if we want diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index f51999dfeeba..63cc0cc4ea3b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -96,8 +96,7 @@ public enum DPSortState { private transient Table table; private Path destPath; private boolean isHiveServerQuery; - private boolean isMmTable; - private String executionPrefix; + private Long mmWriteId; public FileSinkDesc() { } @@ -109,7 +108,7 @@ public FileSinkDesc(final Path dirName, final TableDesc tableInfo, final boolean compressed, final int destTableId, final boolean multiFileSpray, final boolean canBeMerged, final int numFiles, final int totalFiles, final ArrayList partitionCols, final DynamicPartitionCtx dpCtx, Path destPath, - boolean isMmTable) { + Long mmWriteId) { this.dirName = dirName; this.tableInfo = tableInfo; @@ -123,7 +122,7 @@ public FileSinkDesc(final Path dirName, final TableDesc tableInfo, this.dpCtx = dpCtx; this.dpSortState = DPSortState.NONE; this.destPath = destPath; - this.isMmTable = isMmTable; + this.mmWriteId = mmWriteId; } public FileSinkDesc(final Path dirName, final TableDesc tableInfo, @@ -145,7 +144,7 @@ public FileSinkDesc(final Path dirName, final TableDesc tableInfo, public Object clone() throws CloneNotSupportedException { FileSinkDesc ret = new FileSinkDesc(dirName, tableInfo, compressed, destTableId, multiFileSpray, canBeMerged, numFiles, totalFiles, - partitionCols, dpCtx, destPath, isMmTable); + partitionCols, dpCtx, destPath, mmWriteId); ret.setCompressCodec(compressCodec); ret.setCompressType(compressType); ret.setGatherStats(gatherStats); @@ -159,7 +158,6 @@ public Object clone() throws CloneNotSupportedException { ret.setWriteType(writeType); ret.setTransactionId(txnId); ret.setStatsTmpDir(statsTmpDir); - ret.setExecutionPrefix(executionPrefix); return ret; } @@ -254,7 +252,11 @@ public void setTemporary(boolean temporary) { } public boolean isMmTable() { - return isMmTable; + return mmWriteId != null; + } + + public long getMmWriteId() { + return mmWriteId; } public boolean isMaterialization() { @@ -482,12 +484,4 @@ public String getStatsTmpDir() { public void setStatsTmpDir(String statsCollectionTempDir) { this.statsTmpDir = statsCollectionTempDir; } - - public String getExecutionPrefix() { - return this.executionPrefix; - } - - public void setExecutionPrefix(String value) { - this.executionPrefix = value; - } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index 3b491978ec56..fc8726c977dd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -42,7 +42,7 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc // Need to remember whether this is an acid compliant operation, and if so whether it is an // insert, update, or delete. private AcidUtils.Operation writeType; - private boolean isMmTable; + private Long mmWriteId; // TODO: the below seems like they should just be combined into partitionDesc private org.apache.hadoop.hive.ql.plan.TableDesc table; @@ -52,10 +52,10 @@ private LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, final boolean replace, - final AcidUtils.Operation writeType, boolean isMmTable) { + final AcidUtils.Operation writeType, Long mmWriteId) { super(sourcePath); Utilities.LOG14535.info("creating part LTD from " + sourcePath + " to " + table.getTableName()/*, new Exception()*/); - init(table, partitionSpec, replace, writeType, isMmTable); + init(table, partitionSpec, replace, writeType, mmWriteId); } /** @@ -70,15 +70,14 @@ public LoadTableDesc(final Path sourcePath, final Map partitionSpec, final boolean replace) { // TODO# we assume mm=false here - this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID, false); + this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID, null); } public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, - final AcidUtils.Operation writeType, boolean isMmTable) { - // TODO# we assume mm=false here - this(sourcePath, table, partitionSpec, true, writeType, isMmTable); + final AcidUtils.Operation writeType, Long mmWriteId) { + this(sourcePath, table, partitionSpec, true, writeType, mmWriteId); } /** @@ -91,22 +90,21 @@ public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec) { // TODO# we assume mm=false here - this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID, false); + this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID, null); } public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final DynamicPartitionCtx dpCtx, final AcidUtils.Operation writeType, - boolean isReplace, - boolean isMmTable) { + boolean isReplace, Long mmWriteId) { super(sourcePath); Utilities.LOG14535.info("creating LTD from " + sourcePath + " to " + table.getTableName()/*, new Exception()*/); this.dpCtx = dpCtx; if (dpCtx != null && dpCtx.getPartSpec() != null && partitionSpec == null) { - init(table, dpCtx.getPartSpec(), isReplace, writeType, isMmTable); + init(table, dpCtx.getPartSpec(), isReplace, writeType, mmWriteId); } else { - init(table, new LinkedHashMap(), isReplace, writeType, isMmTable); + init(table, new LinkedHashMap(), isReplace, writeType, mmWriteId); } } @@ -114,12 +112,12 @@ private void init( final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, final boolean replace, - AcidUtils.Operation writeType, boolean isMmTable) { + AcidUtils.Operation writeType, Long mmWriteId) { this.table = table; this.partitionSpec = partitionSpec; this.replace = replace; this.writeType = writeType; - this.isMmTable = isMmTable; + this.mmWriteId = mmWriteId; } @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -147,7 +145,7 @@ public boolean getReplace() { @Explain(displayName = "micromanaged table") public boolean isMmTable() { - return isMmTable; + return mmWriteId != null; } public void setReplace(boolean replace) { @@ -187,4 +185,8 @@ public void setLbCtx(ListBucketingCtx lbCtx) { public AcidUtils.Operation getWriteType() { return writeType; } + + public Long getMmWriteId() { + return mmWriteId; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java index 227b0d2231ca..9f498c7fb88a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java @@ -34,7 +34,6 @@ */ @Explain(displayName = "Move Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public class MoveWork implements Serializable { - // TODO# all the places where MoveWork is created need to be handled. private static final long serialVersionUID = 1L; private LoadTableDesc loadTableWork; private LoadFileDesc loadFileWork; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index d3c361189a4a..066d2b6d9a7e 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -146,7 +146,7 @@ public class TestExecDriver extends TestCase { db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class); - db.loadTable(hadoopDataFile[i], src, false, true, false, false, false); + db.loadTable(hadoopDataFile[i], src, false, true, false, false, false, null); i++; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java index 1c27873877c5..909114c55a38 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java @@ -285,7 +285,7 @@ private FileSinkOperator getFileSink(AcidUtils.Operation writeType, partColMap.put(PARTCOL_NAME, null); DynamicPartitionCtx dpCtx = new DynamicPartitionCtx(null, partColMap, "Sunday", 100); //todo: does this need the finalDestination? - desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx, null, false); + desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx, null, null); } else { desc = new FileSinkDesc(basePath, tableDesc, false); } diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index 11259cb1617a..8d19df65500a 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -4,18 +4,30 @@ set hive.exec.dynamic.partition.mode=nonstrict; set hive.fetch.task.conversion=none; set tez.grouping.min-size=1; set tez.grouping.max-size=2; -set hive.tez.auto.reducer.parallelism=false; +set hive.tez.auto.reducer.parallelism=false; + +drop table part_mm; +drop table simple_mm; +drop table intermediate; create table intermediate(key int) partitioned by (p int) stored as orc; -insert into table intermediate partition(p='455') select key from src limit 3; -insert into table intermediate partition(p='456') select key from src limit 3; -insert into table intermediate partition(p='457') select key from src limit 3; +insert into table intermediate partition(p='455') select key from src limit 2; +insert into table intermediate partition(p='456') select key from src limit 2; -create table simple_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true'); +create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true'); + +explain insert into table part_mm partition(key_mm='455') select key from intermediate; +insert into table part_mm partition(key_mm='455') select key from intermediate; +insert into table part_mm partition(key_mm='456') select key from intermediate; +insert into table part_mm partition(key_mm='455') select key from intermediate; +select * from part_mm; -explain insert into table simple_mm partition(key_mm='455') select key from intermediate; -insert into table simple_mm partition(key_mm='455') select key from intermediate; +create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true'); +insert into table simple_mm select key from intermediate; +insert into table simple_mm select key from intermediate; +select * from simple_mm; +drop table part_mm; drop table simple_mm; drop table intermediate; diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index 8f1af4ca7063..f357020d9628 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -1,3 +1,15 @@ +PREHOOK: query: drop table part_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table part_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table simple_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table simple_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table intermediate +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermediate +POSTHOOK: type: DROPTABLE PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -6,44 +18,35 @@ POSTHOOK: query: create table intermediate(key int) partitioned by (p int) store POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@intermediate -PREHOOK: query: insert into table intermediate partition(p='455') select key from src limit 3 +PREHOOK: query: insert into table intermediate partition(p='455') select key from src limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@intermediate@p=455 -POSTHOOK: query: insert into table intermediate partition(p='455') select key from src limit 3 +POSTHOOK: query: insert into table intermediate partition(p='455') select key from src limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=455 POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='456') select key from src limit 3 +PREHOOK: query: insert into table intermediate partition(p='456') select key from src limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@intermediate@p=456 -POSTHOOK: query: insert into table intermediate partition(p='456') select key from src limit 3 +POSTHOOK: query: insert into table intermediate partition(p='456') select key from src limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='457') select key from src limit 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=457 -POSTHOOK: query: insert into table intermediate partition(p='457') select key from src limit 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=457 -POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: create table simple_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@simple_mm -POSTHOOK: query: create table simple_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: Output: default@part_mm +POSTHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@simple_mm -PREHOOK: query: explain insert into table simple_mm partition(key_mm='455') select key from intermediate +POSTHOOK: Output: default@part_mm +PREHOOK: query: explain insert into table part_mm partition(key_mm='455') select key from intermediate PREHOOK: type: QUERY -POSTHOOK: query: explain insert into table simple_mm partition(key_mm='455') select key from intermediate +POSTHOOK: query: explain insert into table part_mm partition(key_mm='455') select key from intermediate POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -60,19 +63,19 @@ STAGE PLANS: Map Operator Tree: TableScan alias: intermediate - Statistics: Num rows: 9 Data size: 108 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) outputColumnNames: _col0 - Statistics: Num rows: 9 Data size: 108 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 9 Data size: 108 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.simple_mm + name: default.part_mm Execution mode: llap LLAP IO: all inputs @@ -89,27 +92,133 @@ STAGE PLANS: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.simple_mm + name: default.part_mm micromanaged table: true Stage: Stage-3 Stats-Aggr Operator -PREHOOK: query: insert into table simple_mm partition(key_mm='455') select key from intermediate +PREHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table part_mm partition(key_mm='456') select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@part_mm@key_mm=456 +POSTHOOK: query: insert into table part_mm partition(key_mm='456') select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@part_mm@key_mm=456 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from part_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@part_mm +PREHOOK: Input: default@part_mm@key_mm=455 +PREHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from part_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part_mm +POSTHOOK: Input: default@part_mm@key_mm=455 +POSTHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +0 455 +455 455 +0 455 +455 455 +0 455 +455 455 +0 455 +455 455 +0 456 +455 456 +0 456 +455 456 +PREHOOK: query: create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@simple_mm +POSTHOOK: query: create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@simple_mm +PREHOOK: query: insert into table simple_mm select key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 -PREHOOK: Output: default@simple_mm@key_mm=455 -POSTHOOK: query: insert into table simple_mm partition(key_mm='455') select key from intermediate +PREHOOK: Output: default@simple_mm +POSTHOOK: query: insert into table simple_mm select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 -POSTHOOK: Output: default@simple_mm@key_mm=455 -POSTHOOK: Lineage: simple_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Output: default@simple_mm +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table simple_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@simple_mm +POSTHOOK: query: insert into table simple_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@simple_mm +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from simple_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@simple_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from simple_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@simple_mm +#### A masked pattern was here #### +0 +455 +0 +455 +0 +455 +0 +455 +PREHOOK: query: drop table part_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@part_mm +PREHOOK: Output: default@part_mm +POSTHOOK: query: drop table part_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@part_mm +POSTHOOK: Output: default@part_mm PREHOOK: query: drop table simple_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@simple_mm From 3e481b4719f49a5c15ce1d745b05392f5d0f2627 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Mon, 12 Sep 2016 13:20:59 -0700 Subject: [PATCH 04/24] HIVE-14644 : use metastore information on the read path appropriately (Sergey Shelukhin) --- .../hadoop/hive/common/ValidWriteIds.java | 158 + itests/pom.xml | 28 - itests/qtest-spark/pom.xml | 54 +- metastore/if/hive_metastore.thrift | 12 + .../upgrade/derby/037-HIVE-14637.derby.sql | 4 +- .../upgrade/derby/hive-schema-2.2.0.derby.sql | 2 +- .../upgrade/mssql/022-HIVE-14637.mssql.sql | 4 +- .../upgrade/mssql/hive-schema-2.2.0.mssql.sql | 4 +- .../upgrade/mysql/037-HIVE-14637.mysql.sql | 4 +- .../upgrade/mysql/hive-schema-2.2.0.mysql.sql | 4 +- .../upgrade/oracle/037-HIVE-14637.oracle.sql | 4 +- .../oracle/hive-schema-2.2.0.oracle.sql | 4 +- .../postgres/036-HIVE-14637.postgres.sql | 4 +- .../postgres/hive-schema-2.2.0.postgres.sql | 4 +- .../thrift/gen-cpp/ThriftHiveMetastore.cpp | 2403 ++++++---- .../gen/thrift/gen-cpp/ThriftHiveMetastore.h | 126 + .../ThriftHiveMetastore_server.skeleton.cpp | 5 + .../thrift/gen-cpp/hive_metastore_types.cpp | 533 ++- .../gen/thrift/gen-cpp/hive_metastore_types.h | 115 + .../api/GetAllFunctionsResponse.java | 36 +- .../api/GetValidWriteIdsRequest.java | 490 ++ .../metastore/api/GetValidWriteIdsResult.java | 740 +++ .../metastore/api/ThriftHiveMetastore.java | 4242 ++++++++++------- .../gen-php/metastore/ThriftHiveMetastore.php | 1448 +++--- .../gen/thrift/gen-php/metastore/Types.php | 288 +- .../hive_metastore/ThriftHiveMetastore-remote | 7 + .../hive_metastore/ThriftHiveMetastore.py | 1379 +++--- .../thrift/gen-py/hive_metastore/ttypes.py | 212 +- .../gen/thrift/gen-rb/hive_metastore_types.rb | 44 + .../thrift/gen-rb/thrift_hive_metastore.rb | 54 + .../hadoop/hive/metastore/HiveMetaStore.java | 167 +- .../hive/metastore/HiveMetaStoreClient.java | 6 + .../hive/metastore/IMetaStoreClient.java | 3 + .../hive/metastore/MetaStoreDirectSql.java | 9 +- .../hadoop/hive/metastore/ObjectStore.java | 128 +- .../hadoop/hive/metastore/RawStore.java | 11 + .../hive/metastore/hbase/HBaseStore.java | 27 +- metastore/src/model/package.jdo | 16 +- .../DummyRawStoreControlledCommit.java | 12 + .../DummyRawStoreForJdoConnection.java | 13 +- .../org/apache/hadoop/hive/ql/Driver.java | 38 + .../hadoop/hive/ql/exec/FetchOperator.java | 5 + .../apache/hadoop/hive/ql/exec/FetchTask.java | 5 + .../hadoop/hive/ql/exec/FileSinkOperator.java | 49 +- .../apache/hadoop/hive/ql/exec/MoveTask.java | 14 +- .../apache/hadoop/hive/ql/exec/Utilities.java | 52 +- .../apache/hadoop/hive/ql/io/AcidUtils.java | 4 - .../hadoop/hive/ql/io/HiveInputFormat.java | 59 +- .../apache/hadoop/hive/ql/metadata/Hive.java | 94 +- .../hive/ql/optimizer/GenMapRedUtils.java | 2 +- .../AnnotateRunTimeStatsOptimizer.java | 3 +- .../hadoop/hive/ql/parse/TaskCompiler.java | 2 +- .../apache/hadoop/hive/ql/plan/MapWork.java | 2 + .../apache/hadoop/hive/ql/plan/PlanUtils.java | 14 - .../apache/hadoop/hive/ql/plan/TableDesc.java | 3 +- .../test/queries/clientpositive/mm_current.q | 3 +- .../clientpositive/llap/mm_current.q.out | 8 +- 57 files changed, 8788 insertions(+), 4373 deletions(-) create mode 100644 common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java create mode 100644 metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java create mode 100644 metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java new file mode 100644 index 000000000000..b25a72d4b30e --- /dev/null +++ b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java @@ -0,0 +1,158 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.common; + +import java.util.Arrays; +import java.util.HashSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ValidWriteIds { + public static final ValidWriteIds NO_WRITE_IDS = new ValidWriteIds(-1, -1, false, null); + + private static final String MM_PREFIX = "mm"; + + private final static Logger LOG = LoggerFactory.getLogger(ValidWriteIds.class); + + private static final String VALID_WRITEIDS_PREFIX = "hive.valid.write.ids."; + private final long lowWatermark, highWatermark; + private final boolean areIdsValid; + private final HashSet ids; + private String source = null; + + public ValidWriteIds( + long lowWatermark, long highWatermark, boolean areIdsValid, HashSet ids) { + this.lowWatermark = lowWatermark; + this.highWatermark = highWatermark; + this.areIdsValid = areIdsValid; + this.ids = ids; + } + + public static ValidWriteIds createFromConf(Configuration conf, String dbName, String tblName) { + return createFromConf(conf, dbName + "." + tblName); + } + + public static ValidWriteIds createFromConf(Configuration conf, String fullTblName) { + String idStr = conf.get(createConfKey(fullTblName), null); + if (idStr == null || idStr.isEmpty()) return null; + return new ValidWriteIds(idStr); + } + + private static String createConfKey(String dbName, String tblName) { + return createConfKey(dbName + "." + tblName); + } + + private static String createConfKey(String fullName) { + return VALID_WRITEIDS_PREFIX + fullName; + } + + private ValidWriteIds(String src) { + // TODO: lifted from ACID config implementation... optimize if needed? e.g. ranges, base64 + String[] values = src.split(":"); + highWatermark = Long.parseLong(values[0]); + lowWatermark = Long.parseLong(values[1]); + if (values.length > 2) { + areIdsValid = Long.parseLong(values[2]) > 0; + ids = new HashSet(); + for(int i = 3; i < values.length; ++i) { + ids.add(Long.parseLong(values[i])); + } + } else { + areIdsValid = false; + ids = null; + } + } + + public void addToConf(Configuration conf, String dbName, String tblName) { + if (source == null) { + source = toString(); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Setting " + createConfKey(dbName, tblName) + " => " + source); + } + conf.set(createConfKey(dbName, tblName), source); + } + + public String toString() { + // TODO: lifted from ACID config implementation... optimize if needed? e.g. ranges, base64 + StringBuilder buf = new StringBuilder(); + buf.append(highWatermark); + buf.append(':'); + buf.append(lowWatermark); + if (ids != null) { + buf.append(':'); + buf.append(areIdsValid ? 1 : 0); + for (long id : ids) { + buf.append(':'); + buf.append(id); + } + } + return buf.toString(); + } + + public boolean isValid(long writeId) { + if (writeId < 0) throw new RuntimeException("Incorrect write ID " + writeId); + if (writeId <= lowWatermark) return true; + if (writeId >= highWatermark) return false; + return ids != null && (areIdsValid == ids.contains(writeId)); + } + + public boolean isValidInput(Path file) { + String fileName = file.getName(); + String[] parts = fileName.split("_", 3); + if (parts.length < 2 || !MM_PREFIX.equals(parts[0])) { + LOG.info("Ignoring unknown file for a MM table: " + file + + " (" + Arrays.toString(parts) + ")"); + return false; + } + long writeId = -1; + try { + writeId = Long.parseLong(parts[1]); + } catch (NumberFormatException ex) { + LOG.info("Ignoring unknown file for a MM table: " + file + + "; parsing " + parts[1] + " got " + ex.getMessage()); + return false; + } + return isValid(writeId); + } + + public static String getMmFilePrefix(long mmWriteId) { + return MM_PREFIX + "_" + mmWriteId; + } + + + public static class IdPathFilter implements PathFilter { + private final String prefix, tmpPrefix; + private final boolean isMatch; + public IdPathFilter(long writeId, boolean isMatch) { + this.prefix = ValidWriteIds.getMmFilePrefix(writeId); + this.tmpPrefix = "_tmp." + prefix; + this.isMatch = isMatch; + } + + @Override + public boolean accept(Path path) { + String name = path.getName(); + return isMatch == (name.startsWith(prefix) || name.startsWith(tmpPrefix)); + } + } +} \ No newline at end of file diff --git a/itests/pom.xml b/itests/pom.xml index a452db379fb0..8b5b6b79bb3d 100644 --- a/itests/pom.xml +++ b/itests/pom.xml @@ -72,34 +72,6 @@ set -x - /bin/pwd - BASE_DIR=./target - HIVE_ROOT=$BASE_DIR/../../../ - DOWNLOAD_DIR=./../thirdparty - download() { - url=$1; - finalName=$2 - tarName=$(basename $url) - rm -rf $BASE_DIR/$finalName - if [[ ! -f $DOWNLOAD_DIR/$tarName ]] - then - curl -Sso $DOWNLOAD_DIR/$tarName $url - else - local md5File="$tarName".md5sum - curl -Sso $DOWNLOAD_DIR/$md5File "$url".md5sum - cd $DOWNLOAD_DIR - if ! md5sum -c $md5File; then - curl -Sso $DOWNLOAD_DIR/$tarName $url || return 1 - fi - - cd - - fi - tar -zxf $DOWNLOAD_DIR/$tarName -C $BASE_DIR - mv $BASE_DIR/spark-${spark.version}-bin-hadoop2-without-hive $BASE_DIR/$finalName - } - mkdir -p $DOWNLOAD_DIR - download "http://d3jw87u4immizc.cloudfront.net/spark-tarball/spark-${spark.version}-bin-hadoop2-without-hive.tgz" "spark" - cp -f $HIVE_ROOT/data/conf/spark/log4j2.properties $BASE_DIR/spark/conf/ diff --git a/itests/qtest-spark/pom.xml b/itests/qtest-spark/pom.xml index 1e6c3a261dd3..07282fb395e2 100644 --- a/itests/qtest-spark/pom.xml +++ b/itests/qtest-spark/pom.xml @@ -347,6 +347,38 @@ + + org.codehaus.mojo + build-helper-maven-plugin + ${maven.build-helper.plugin.version} + + + add-test-sources + generate-test-sources + + add-test-source + + + + target/generated-test-sources/java + + + + + + + + + + + spark-test + + + !skipSparkTests + + + + org.apache.maven.plugins maven-antrun-plugin @@ -388,26 +420,8 @@ - - org.codehaus.mojo - build-helper-maven-plugin - ${maven.build-helper.plugin.version} - - - add-test-sources - generate-test-sources - - add-test-source - - - - target/generated-test-sources/java - - - - - - + + diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index 95eee271cff5..0101eabdd73e 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -918,6 +918,17 @@ struct HeartbeatWriteIdRequest { struct HeartbeatWriteIdResult { } +struct GetValidWriteIdsRequest { + 1: required string dbName, + 2: required string tblName +} +struct GetValidWriteIdsResult { + 1: required i64 lowWatermarkId, + 2: required i64 highWatermarkId, + 3: optional bool areIdsValid, + 4: optional list ids +} + struct GetAllFunctionsResponse { 1: optional list functions @@ -1470,6 +1481,7 @@ service ThriftHiveMetastore extends fb303.FacebookService GetNextWriteIdResult get_next_write_id(1:GetNextWriteIdRequest req) FinalizeWriteIdResult finalize_write_id(1:FinalizeWriteIdRequest req) HeartbeatWriteIdResult heartbeat_write_id(1:HeartbeatWriteIdRequest req) + GetValidWriteIdsResult get_valid_write_ids(1:GetValidWriteIdsRequest req) } // * Note about the DDL_TIME: When creating or altering a table or a partition, diff --git a/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql b/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql index 8cea9f162d09..88a48f0705ec 100644 --- a/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql +++ b/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql @@ -1,5 +1,5 @@ -ALTER TABLE "TBLS" ADD "MM_WATERMARK_WRITE_ID" BIGINT; -ALTER TABLE "TBLS" ADD "MM_NEXT_WRITE_ID" BIGINT; +ALTER TABLE "TBLS" ADD "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1; +ALTER TABLE "TBLS" ADD "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0; CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "LAST_HEARTBEAT" BIGINT); ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql index 3c4ba4b55f55..f86ee4af9413 100644 --- a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql +++ b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql @@ -60,7 +60,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), " CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "MM_WATERMARK_WRITE_ID" BIGINT, "MM_NEXT_WRITE_ID" BIGINT); +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1, "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0); CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); diff --git a/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql b/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql index bb429357e567..5d6f99f59ae8 100644 --- a/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql +++ b/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql @@ -1,5 +1,5 @@ -ALTER TABLE TBLS ADD MM_WATERMARK_WRITE_ID BIGINT NULL; -ALTER TABLE TBLS ADD MM_NEXT_WRITE_ID BIGINT NULL; +ALTER TABLE TBLS ADD MM_WATERMARK_WRITE_ID BIGINT DEFAULT -1; +ALTER TABLE TBLS ADD MM_NEXT_WRITE_ID BIGINT DEFAULT 0; CREATE TABLE TBL_WRITES ( diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql index 6bd0d8780595..26b2ab3cb2bc 100644 --- a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql +++ b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql @@ -359,8 +359,8 @@ CREATE TABLE TBLS TBL_TYPE nvarchar(128) NULL, VIEW_EXPANDED_TEXT text NULL, VIEW_ORIGINAL_TEXT text NULL, - MM_WATERMARK_WRITE_ID BIGINT NULL, - MM_NEXT_WRITE_ID BIGINT NULL + MM_WATERMARK_WRITE_ID BIGINT NULL DEFAULT -1, + MM_NEXT_WRITE_ID BIGINT NULL DEFAULT 0 ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); diff --git a/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql b/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql index 1b740d5694dd..c024584d5a1d 100644 --- a/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql +++ b/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql @@ -1,5 +1,5 @@ -alter table `TBLS` ADD COLUMN `MM_WATERMARK_WRITE_ID` bigint(20); -alter table `TBLS` ADD COLUMN `MM_NEXT_WRITE_ID` bigint(20); +alter table `TBLS` ADD COLUMN `MM_WATERMARK_WRITE_ID` bigint(20) DEFAULT -1; +alter table `TBLS` ADD COLUMN `MM_NEXT_WRITE_ID` bigint(20) DEFAULT 0; CREATE TABLE IF NOT EXISTS `TBL_WRITES` ( diff --git a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql index f7ef94886d26..b295950c848f 100644 --- a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql +++ b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql @@ -587,8 +587,8 @@ CREATE TABLE IF NOT EXISTS `TBLS` ( `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `VIEW_EXPANDED_TEXT` mediumtext, `VIEW_ORIGINAL_TEXT` mediumtext, - `MM_WATERMARK_WRITE_ID` bigint(20), - `MM_NEXT_WRITE_ID` bigint(20), + `MM_WATERMARK_WRITE_ID` bigint(20) DEFAULT -1, + `MM_NEXT_WRITE_ID` bigint(20) DEFAULT 0, PRIMARY KEY (`TBL_ID`), UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), KEY `TBLS_N50` (`SD_ID`), diff --git a/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql b/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql index bc5fb6b689e7..9f6dbb234b88 100644 --- a/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql +++ b/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql @@ -1,5 +1,5 @@ -ALTER TABLE TBLS ADD MM_WATERMARK_WRITE_ID NUMBER; -ALTER TABLE TBLS ADD MM_NEXT_WRITE_ID NUMBER; +ALTER TABLE TBLS ADD MM_WATERMARK_WRITE_ID NUMBER DEFAULT -1; +ALTER TABLE TBLS ADD MM_NEXT_WRITE_ID NUMBER DEFAULT 0; CREATE TABLE TBL_WRITES ( diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql index 503ce09e0bf6..6972c2066907 100644 --- a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql @@ -376,8 +376,8 @@ CREATE TABLE TBLS TBL_TYPE VARCHAR2(128) NULL, VIEW_EXPANDED_TEXT CLOB NULL, VIEW_ORIGINAL_TEXT CLOB NULL, - MM_WATERMARK_WRITE_ID NUMBER NULL, - MM_NEXT_WRITE_ID NUMBER NULL + MM_WATERMARK_WRITE_ID NUMBER DEFAULT -1, + MM_NEXT_WRITE_ID NUMBER DEFAULT 0 ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); diff --git a/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql b/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql index d94c19d729ad..f153837c459e 100644 --- a/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql +++ b/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql @@ -1,6 +1,6 @@ -ALTER TABLE "TBLS" ADD COLUMN "MM_WATERMARK_WRITE_ID" bigint; -ALTER TABLE "TBLS" ADD COLUMN "MM_NEXT_WRITE_ID" bigint; +ALTER TABLE "TBLS" ADD COLUMN "MM_WATERMARK_WRITE_ID" bigint DEFAULT -1; +ALTER TABLE "TBLS" ADD COLUMN "MM_NEXT_WRITE_ID" bigint DEFAULT 0; CREATE TABLE "TBL_WRITES" ( diff --git a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql index bf1d76960d68..de997d357404 100644 --- a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql +++ b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql @@ -373,8 +373,8 @@ CREATE TABLE "TBLS" ( "TBL_TYPE" character varying(128) DEFAULT NULL::character varying, "VIEW_EXPANDED_TEXT" text, "VIEW_ORIGINAL_TEXT" text, - "MM_WATERMARK_WRITE_ID" bigint, - "MM_NEXT_WRITE_ID" bigint + "MM_WATERMARK_WRITE_ID" bigint DEFAULT -1, + "MM_NEXT_WRITE_ID" bigint DEFAULT 0 ); diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 5ed3912f00f5..0b6fe91836c3 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size793; - ::apache::thrift::protocol::TType _etype796; - xfer += iprot->readListBegin(_etype796, _size793); - this->success.resize(_size793); - uint32_t _i797; - for (_i797 = 0; _i797 < _size793; ++_i797) + uint32_t _size803; + ::apache::thrift::protocol::TType _etype806; + xfer += iprot->readListBegin(_etype806, _size803); + this->success.resize(_size803); + uint32_t _i807; + for (_i807 = 0; _i807 < _size803; ++_i807) { - xfer += iprot->readString(this->success[_i797]); + xfer += iprot->readString(this->success[_i807]); } xfer += iprot->readListEnd(); } @@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter798; - for (_iter798 = this->success.begin(); _iter798 != this->success.end(); ++_iter798) + std::vector ::const_iterator _iter808; + for (_iter808 = this->success.begin(); _iter808 != this->success.end(); ++_iter808) { - xfer += oprot->writeString((*_iter798)); + xfer += oprot->writeString((*_iter808)); } xfer += oprot->writeListEnd(); } @@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size799; - ::apache::thrift::protocol::TType _etype802; - xfer += iprot->readListBegin(_etype802, _size799); - (*(this->success)).resize(_size799); - uint32_t _i803; - for (_i803 = 0; _i803 < _size799; ++_i803) + uint32_t _size809; + ::apache::thrift::protocol::TType _etype812; + xfer += iprot->readListBegin(_etype812, _size809); + (*(this->success)).resize(_size809); + uint32_t _i813; + for (_i813 = 0; _i813 < _size809; ++_i813) { - xfer += iprot->readString((*(this->success))[_i803]); + xfer += iprot->readString((*(this->success))[_i813]); } xfer += iprot->readListEnd(); } @@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size804; - ::apache::thrift::protocol::TType _etype807; - xfer += iprot->readListBegin(_etype807, _size804); - this->success.resize(_size804); - uint32_t _i808; - for (_i808 = 0; _i808 < _size804; ++_i808) + uint32_t _size814; + ::apache::thrift::protocol::TType _etype817; + xfer += iprot->readListBegin(_etype817, _size814); + this->success.resize(_size814); + uint32_t _i818; + for (_i818 = 0; _i818 < _size814; ++_i818) { - xfer += iprot->readString(this->success[_i808]); + xfer += iprot->readString(this->success[_i818]); } xfer += iprot->readListEnd(); } @@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter809; - for (_iter809 = this->success.begin(); _iter809 != this->success.end(); ++_iter809) + std::vector ::const_iterator _iter819; + for (_iter819 = this->success.begin(); _iter819 != this->success.end(); ++_iter819) { - xfer += oprot->writeString((*_iter809)); + xfer += oprot->writeString((*_iter819)); } xfer += oprot->writeListEnd(); } @@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size810; - ::apache::thrift::protocol::TType _etype813; - xfer += iprot->readListBegin(_etype813, _size810); - (*(this->success)).resize(_size810); - uint32_t _i814; - for (_i814 = 0; _i814 < _size810; ++_i814) + uint32_t _size820; + ::apache::thrift::protocol::TType _etype823; + xfer += iprot->readListBegin(_etype823, _size820); + (*(this->success)).resize(_size820); + uint32_t _i824; + for (_i824 = 0; _i824 < _size820; ++_i824) { - xfer += iprot->readString((*(this->success))[_i814]); + xfer += iprot->readString((*(this->success))[_i824]); } xfer += iprot->readListEnd(); } @@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size815; - ::apache::thrift::protocol::TType _ktype816; - ::apache::thrift::protocol::TType _vtype817; - xfer += iprot->readMapBegin(_ktype816, _vtype817, _size815); - uint32_t _i819; - for (_i819 = 0; _i819 < _size815; ++_i819) + uint32_t _size825; + ::apache::thrift::protocol::TType _ktype826; + ::apache::thrift::protocol::TType _vtype827; + xfer += iprot->readMapBegin(_ktype826, _vtype827, _size825); + uint32_t _i829; + for (_i829 = 0; _i829 < _size825; ++_i829) { - std::string _key820; - xfer += iprot->readString(_key820); - Type& _val821 = this->success[_key820]; - xfer += _val821.read(iprot); + std::string _key830; + xfer += iprot->readString(_key830); + Type& _val831 = this->success[_key830]; + xfer += _val831.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter822; - for (_iter822 = this->success.begin(); _iter822 != this->success.end(); ++_iter822) + std::map ::const_iterator _iter832; + for (_iter832 = this->success.begin(); _iter832 != this->success.end(); ++_iter832) { - xfer += oprot->writeString(_iter822->first); - xfer += _iter822->second.write(oprot); + xfer += oprot->writeString(_iter832->first); + xfer += _iter832->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size823; - ::apache::thrift::protocol::TType _ktype824; - ::apache::thrift::protocol::TType _vtype825; - xfer += iprot->readMapBegin(_ktype824, _vtype825, _size823); - uint32_t _i827; - for (_i827 = 0; _i827 < _size823; ++_i827) + uint32_t _size833; + ::apache::thrift::protocol::TType _ktype834; + ::apache::thrift::protocol::TType _vtype835; + xfer += iprot->readMapBegin(_ktype834, _vtype835, _size833); + uint32_t _i837; + for (_i837 = 0; _i837 < _size833; ++_i837) { - std::string _key828; - xfer += iprot->readString(_key828); - Type& _val829 = (*(this->success))[_key828]; - xfer += _val829.read(iprot); + std::string _key838; + xfer += iprot->readString(_key838); + Type& _val839 = (*(this->success))[_key838]; + xfer += _val839.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size830; - ::apache::thrift::protocol::TType _etype833; - xfer += iprot->readListBegin(_etype833, _size830); - this->success.resize(_size830); - uint32_t _i834; - for (_i834 = 0; _i834 < _size830; ++_i834) + uint32_t _size840; + ::apache::thrift::protocol::TType _etype843; + xfer += iprot->readListBegin(_etype843, _size840); + this->success.resize(_size840); + uint32_t _i844; + for (_i844 = 0; _i844 < _size840; ++_i844) { - xfer += this->success[_i834].read(iprot); + xfer += this->success[_i844].read(iprot); } xfer += iprot->readListEnd(); } @@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter835; - for (_iter835 = this->success.begin(); _iter835 != this->success.end(); ++_iter835) + std::vector ::const_iterator _iter845; + for (_iter845 = this->success.begin(); _iter845 != this->success.end(); ++_iter845) { - xfer += (*_iter835).write(oprot); + xfer += (*_iter845).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size836; - ::apache::thrift::protocol::TType _etype839; - xfer += iprot->readListBegin(_etype839, _size836); - (*(this->success)).resize(_size836); - uint32_t _i840; - for (_i840 = 0; _i840 < _size836; ++_i840) + uint32_t _size846; + ::apache::thrift::protocol::TType _etype849; + xfer += iprot->readListBegin(_etype849, _size846); + (*(this->success)).resize(_size846); + uint32_t _i850; + for (_i850 = 0; _i850 < _size846; ++_i850) { - xfer += (*(this->success))[_i840].read(iprot); + xfer += (*(this->success))[_i850].read(iprot); } xfer += iprot->readListEnd(); } @@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size841; - ::apache::thrift::protocol::TType _etype844; - xfer += iprot->readListBegin(_etype844, _size841); - this->success.resize(_size841); - uint32_t _i845; - for (_i845 = 0; _i845 < _size841; ++_i845) + uint32_t _size851; + ::apache::thrift::protocol::TType _etype854; + xfer += iprot->readListBegin(_etype854, _size851); + this->success.resize(_size851); + uint32_t _i855; + for (_i855 = 0; _i855 < _size851; ++_i855) { - xfer += this->success[_i845].read(iprot); + xfer += this->success[_i855].read(iprot); } xfer += iprot->readListEnd(); } @@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter846; - for (_iter846 = this->success.begin(); _iter846 != this->success.end(); ++_iter846) + std::vector ::const_iterator _iter856; + for (_iter856 = this->success.begin(); _iter856 != this->success.end(); ++_iter856) { - xfer += (*_iter846).write(oprot); + xfer += (*_iter856).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size847; - ::apache::thrift::protocol::TType _etype850; - xfer += iprot->readListBegin(_etype850, _size847); - (*(this->success)).resize(_size847); - uint32_t _i851; - for (_i851 = 0; _i851 < _size847; ++_i851) + uint32_t _size857; + ::apache::thrift::protocol::TType _etype860; + xfer += iprot->readListBegin(_etype860, _size857); + (*(this->success)).resize(_size857); + uint32_t _i861; + for (_i861 = 0; _i861 < _size857; ++_i861) { - xfer += (*(this->success))[_i851].read(iprot); + xfer += (*(this->success))[_i861].read(iprot); } xfer += iprot->readListEnd(); } @@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size852; - ::apache::thrift::protocol::TType _etype855; - xfer += iprot->readListBegin(_etype855, _size852); - this->success.resize(_size852); - uint32_t _i856; - for (_i856 = 0; _i856 < _size852; ++_i856) + uint32_t _size862; + ::apache::thrift::protocol::TType _etype865; + xfer += iprot->readListBegin(_etype865, _size862); + this->success.resize(_size862); + uint32_t _i866; + for (_i866 = 0; _i866 < _size862; ++_i866) { - xfer += this->success[_i856].read(iprot); + xfer += this->success[_i866].read(iprot); } xfer += iprot->readListEnd(); } @@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter857; - for (_iter857 = this->success.begin(); _iter857 != this->success.end(); ++_iter857) + std::vector ::const_iterator _iter867; + for (_iter867 = this->success.begin(); _iter867 != this->success.end(); ++_iter867) { - xfer += (*_iter857).write(oprot); + xfer += (*_iter867).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size858; - ::apache::thrift::protocol::TType _etype861; - xfer += iprot->readListBegin(_etype861, _size858); - (*(this->success)).resize(_size858); - uint32_t _i862; - for (_i862 = 0; _i862 < _size858; ++_i862) + uint32_t _size868; + ::apache::thrift::protocol::TType _etype871; + xfer += iprot->readListBegin(_etype871, _size868); + (*(this->success)).resize(_size868); + uint32_t _i872; + for (_i872 = 0; _i872 < _size868; ++_i872) { - xfer += (*(this->success))[_i862].read(iprot); + xfer += (*(this->success))[_i872].read(iprot); } xfer += iprot->readListEnd(); } @@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size863; - ::apache::thrift::protocol::TType _etype866; - xfer += iprot->readListBegin(_etype866, _size863); - this->success.resize(_size863); - uint32_t _i867; - for (_i867 = 0; _i867 < _size863; ++_i867) + uint32_t _size873; + ::apache::thrift::protocol::TType _etype876; + xfer += iprot->readListBegin(_etype876, _size873); + this->success.resize(_size873); + uint32_t _i877; + for (_i877 = 0; _i877 < _size873; ++_i877) { - xfer += this->success[_i867].read(iprot); + xfer += this->success[_i877].read(iprot); } xfer += iprot->readListEnd(); } @@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter868; - for (_iter868 = this->success.begin(); _iter868 != this->success.end(); ++_iter868) + std::vector ::const_iterator _iter878; + for (_iter878 = this->success.begin(); _iter878 != this->success.end(); ++_iter878) { - xfer += (*_iter868).write(oprot); + xfer += (*_iter878).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size869; - ::apache::thrift::protocol::TType _etype872; - xfer += iprot->readListBegin(_etype872, _size869); - (*(this->success)).resize(_size869); - uint32_t _i873; - for (_i873 = 0; _i873 < _size869; ++_i873) + uint32_t _size879; + ::apache::thrift::protocol::TType _etype882; + xfer += iprot->readListBegin(_etype882, _size879); + (*(this->success)).resize(_size879); + uint32_t _i883; + for (_i883 = 0; _i883 < _size879; ++_i883) { - xfer += (*(this->success))[_i873].read(iprot); + xfer += (*(this->success))[_i883].read(iprot); } xfer += iprot->readListEnd(); } @@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size874; - ::apache::thrift::protocol::TType _etype877; - xfer += iprot->readListBegin(_etype877, _size874); - this->primaryKeys.resize(_size874); - uint32_t _i878; - for (_i878 = 0; _i878 < _size874; ++_i878) + uint32_t _size884; + ::apache::thrift::protocol::TType _etype887; + xfer += iprot->readListBegin(_etype887, _size884); + this->primaryKeys.resize(_size884); + uint32_t _i888; + for (_i888 = 0; _i888 < _size884; ++_i888) { - xfer += this->primaryKeys[_i878].read(iprot); + xfer += this->primaryKeys[_i888].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size879; - ::apache::thrift::protocol::TType _etype882; - xfer += iprot->readListBegin(_etype882, _size879); - this->foreignKeys.resize(_size879); - uint32_t _i883; - for (_i883 = 0; _i883 < _size879; ++_i883) + uint32_t _size889; + ::apache::thrift::protocol::TType _etype892; + xfer += iprot->readListBegin(_etype892, _size889); + this->foreignKeys.resize(_size889); + uint32_t _i893; + for (_i893 = 0; _i893 < _size889; ++_i893) { - xfer += this->foreignKeys[_i883].read(iprot); + xfer += this->foreignKeys[_i893].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,10 +4578,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter884; - for (_iter884 = this->primaryKeys.begin(); _iter884 != this->primaryKeys.end(); ++_iter884) + std::vector ::const_iterator _iter894; + for (_iter894 = this->primaryKeys.begin(); _iter894 != this->primaryKeys.end(); ++_iter894) { - xfer += (*_iter884).write(oprot); + xfer += (*_iter894).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4590,10 +4590,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter885; - for (_iter885 = this->foreignKeys.begin(); _iter885 != this->foreignKeys.end(); ++_iter885) + std::vector ::const_iterator _iter895; + for (_iter895 = this->foreignKeys.begin(); _iter895 != this->foreignKeys.end(); ++_iter895) { - xfer += (*_iter885).write(oprot); + xfer += (*_iter895).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4621,10 +4621,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter886; - for (_iter886 = (*(this->primaryKeys)).begin(); _iter886 != (*(this->primaryKeys)).end(); ++_iter886) + std::vector ::const_iterator _iter896; + for (_iter896 = (*(this->primaryKeys)).begin(); _iter896 != (*(this->primaryKeys)).end(); ++_iter896) { - xfer += (*_iter886).write(oprot); + xfer += (*_iter896).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4633,10 +4633,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter887; - for (_iter887 = (*(this->foreignKeys)).begin(); _iter887 != (*(this->foreignKeys)).end(); ++_iter887) + std::vector ::const_iterator _iter897; + for (_iter897 = (*(this->foreignKeys)).begin(); _iter897 != (*(this->foreignKeys)).end(); ++_iter897) { - xfer += (*_iter887).write(oprot); + xfer += (*_iter897).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6055,14 +6055,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size888; - ::apache::thrift::protocol::TType _etype891; - xfer += iprot->readListBegin(_etype891, _size888); - this->success.resize(_size888); - uint32_t _i892; - for (_i892 = 0; _i892 < _size888; ++_i892) + uint32_t _size898; + ::apache::thrift::protocol::TType _etype901; + xfer += iprot->readListBegin(_etype901, _size898); + this->success.resize(_size898); + uint32_t _i902; + for (_i902 = 0; _i902 < _size898; ++_i902) { - xfer += iprot->readString(this->success[_i892]); + xfer += iprot->readString(this->success[_i902]); } xfer += iprot->readListEnd(); } @@ -6101,10 +6101,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter893; - for (_iter893 = this->success.begin(); _iter893 != this->success.end(); ++_iter893) + std::vector ::const_iterator _iter903; + for (_iter903 = this->success.begin(); _iter903 != this->success.end(); ++_iter903) { - xfer += oprot->writeString((*_iter893)); + xfer += oprot->writeString((*_iter903)); } xfer += oprot->writeListEnd(); } @@ -6149,14 +6149,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size894; - ::apache::thrift::protocol::TType _etype897; - xfer += iprot->readListBegin(_etype897, _size894); - (*(this->success)).resize(_size894); - uint32_t _i898; - for (_i898 = 0; _i898 < _size894; ++_i898) + uint32_t _size904; + ::apache::thrift::protocol::TType _etype907; + xfer += iprot->readListBegin(_etype907, _size904); + (*(this->success)).resize(_size904); + uint32_t _i908; + for (_i908 = 0; _i908 < _size904; ++_i908) { - xfer += iprot->readString((*(this->success))[_i898]); + xfer += iprot->readString((*(this->success))[_i908]); } xfer += iprot->readListEnd(); } @@ -6231,14 +6231,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size899; - ::apache::thrift::protocol::TType _etype902; - xfer += iprot->readListBegin(_etype902, _size899); - this->tbl_types.resize(_size899); - uint32_t _i903; - for (_i903 = 0; _i903 < _size899; ++_i903) + uint32_t _size909; + ::apache::thrift::protocol::TType _etype912; + xfer += iprot->readListBegin(_etype912, _size909); + this->tbl_types.resize(_size909); + uint32_t _i913; + for (_i913 = 0; _i913 < _size909; ++_i913) { - xfer += iprot->readString(this->tbl_types[_i903]); + xfer += iprot->readString(this->tbl_types[_i913]); } xfer += iprot->readListEnd(); } @@ -6275,10 +6275,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter904; - for (_iter904 = this->tbl_types.begin(); _iter904 != this->tbl_types.end(); ++_iter904) + std::vector ::const_iterator _iter914; + for (_iter914 = this->tbl_types.begin(); _iter914 != this->tbl_types.end(); ++_iter914) { - xfer += oprot->writeString((*_iter904)); + xfer += oprot->writeString((*_iter914)); } xfer += oprot->writeListEnd(); } @@ -6310,10 +6310,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter905; - for (_iter905 = (*(this->tbl_types)).begin(); _iter905 != (*(this->tbl_types)).end(); ++_iter905) + std::vector ::const_iterator _iter915; + for (_iter915 = (*(this->tbl_types)).begin(); _iter915 != (*(this->tbl_types)).end(); ++_iter915) { - xfer += oprot->writeString((*_iter905)); + xfer += oprot->writeString((*_iter915)); } xfer += oprot->writeListEnd(); } @@ -6354,14 +6354,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size906; - ::apache::thrift::protocol::TType _etype909; - xfer += iprot->readListBegin(_etype909, _size906); - this->success.resize(_size906); - uint32_t _i910; - for (_i910 = 0; _i910 < _size906; ++_i910) + uint32_t _size916; + ::apache::thrift::protocol::TType _etype919; + xfer += iprot->readListBegin(_etype919, _size916); + this->success.resize(_size916); + uint32_t _i920; + for (_i920 = 0; _i920 < _size916; ++_i920) { - xfer += this->success[_i910].read(iprot); + xfer += this->success[_i920].read(iprot); } xfer += iprot->readListEnd(); } @@ -6400,10 +6400,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter911; - for (_iter911 = this->success.begin(); _iter911 != this->success.end(); ++_iter911) + std::vector ::const_iterator _iter921; + for (_iter921 = this->success.begin(); _iter921 != this->success.end(); ++_iter921) { - xfer += (*_iter911).write(oprot); + xfer += (*_iter921).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6448,14 +6448,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size912; - ::apache::thrift::protocol::TType _etype915; - xfer += iprot->readListBegin(_etype915, _size912); - (*(this->success)).resize(_size912); - uint32_t _i916; - for (_i916 = 0; _i916 < _size912; ++_i916) + uint32_t _size922; + ::apache::thrift::protocol::TType _etype925; + xfer += iprot->readListBegin(_etype925, _size922); + (*(this->success)).resize(_size922); + uint32_t _i926; + for (_i926 = 0; _i926 < _size922; ++_i926) { - xfer += (*(this->success))[_i916].read(iprot); + xfer += (*(this->success))[_i926].read(iprot); } xfer += iprot->readListEnd(); } @@ -6593,14 +6593,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size917; - ::apache::thrift::protocol::TType _etype920; - xfer += iprot->readListBegin(_etype920, _size917); - this->success.resize(_size917); - uint32_t _i921; - for (_i921 = 0; _i921 < _size917; ++_i921) + uint32_t _size927; + ::apache::thrift::protocol::TType _etype930; + xfer += iprot->readListBegin(_etype930, _size927); + this->success.resize(_size927); + uint32_t _i931; + for (_i931 = 0; _i931 < _size927; ++_i931) { - xfer += iprot->readString(this->success[_i921]); + xfer += iprot->readString(this->success[_i931]); } xfer += iprot->readListEnd(); } @@ -6639,10 +6639,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter922; - for (_iter922 = this->success.begin(); _iter922 != this->success.end(); ++_iter922) + std::vector ::const_iterator _iter932; + for (_iter932 = this->success.begin(); _iter932 != this->success.end(); ++_iter932) { - xfer += oprot->writeString((*_iter922)); + xfer += oprot->writeString((*_iter932)); } xfer += oprot->writeListEnd(); } @@ -6687,14 +6687,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size923; - ::apache::thrift::protocol::TType _etype926; - xfer += iprot->readListBegin(_etype926, _size923); - (*(this->success)).resize(_size923); - uint32_t _i927; - for (_i927 = 0; _i927 < _size923; ++_i927) + uint32_t _size933; + ::apache::thrift::protocol::TType _etype936; + xfer += iprot->readListBegin(_etype936, _size933); + (*(this->success)).resize(_size933); + uint32_t _i937; + for (_i937 = 0; _i937 < _size933; ++_i937) { - xfer += iprot->readString((*(this->success))[_i927]); + xfer += iprot->readString((*(this->success))[_i937]); } xfer += iprot->readListEnd(); } @@ -7004,14 +7004,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size928; - ::apache::thrift::protocol::TType _etype931; - xfer += iprot->readListBegin(_etype931, _size928); - this->tbl_names.resize(_size928); - uint32_t _i932; - for (_i932 = 0; _i932 < _size928; ++_i932) + uint32_t _size938; + ::apache::thrift::protocol::TType _etype941; + xfer += iprot->readListBegin(_etype941, _size938); + this->tbl_names.resize(_size938); + uint32_t _i942; + for (_i942 = 0; _i942 < _size938; ++_i942) { - xfer += iprot->readString(this->tbl_names[_i932]); + xfer += iprot->readString(this->tbl_names[_i942]); } xfer += iprot->readListEnd(); } @@ -7044,10 +7044,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter933; - for (_iter933 = this->tbl_names.begin(); _iter933 != this->tbl_names.end(); ++_iter933) + std::vector ::const_iterator _iter943; + for (_iter943 = this->tbl_names.begin(); _iter943 != this->tbl_names.end(); ++_iter943) { - xfer += oprot->writeString((*_iter933)); + xfer += oprot->writeString((*_iter943)); } xfer += oprot->writeListEnd(); } @@ -7075,10 +7075,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter934; - for (_iter934 = (*(this->tbl_names)).begin(); _iter934 != (*(this->tbl_names)).end(); ++_iter934) + std::vector ::const_iterator _iter944; + for (_iter944 = (*(this->tbl_names)).begin(); _iter944 != (*(this->tbl_names)).end(); ++_iter944) { - xfer += oprot->writeString((*_iter934)); + xfer += oprot->writeString((*_iter944)); } xfer += oprot->writeListEnd(); } @@ -7119,14 +7119,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size935; - ::apache::thrift::protocol::TType _etype938; - xfer += iprot->readListBegin(_etype938, _size935); - this->success.resize(_size935); - uint32_t _i939; - for (_i939 = 0; _i939 < _size935; ++_i939) + uint32_t _size945; + ::apache::thrift::protocol::TType _etype948; + xfer += iprot->readListBegin(_etype948, _size945); + this->success.resize(_size945); + uint32_t _i949; + for (_i949 = 0; _i949 < _size945; ++_i949) { - xfer += this->success[_i939].read(iprot); + xfer += this->success[_i949].read(iprot); } xfer += iprot->readListEnd(); } @@ -7181,10 +7181,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector
::const_iterator _iter940; - for (_iter940 = this->success.begin(); _iter940 != this->success.end(); ++_iter940) + std::vector
::const_iterator _iter950; + for (_iter950 = this->success.begin(); _iter950 != this->success.end(); ++_iter950) { - xfer += (*_iter940).write(oprot); + xfer += (*_iter950).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7237,14 +7237,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size941; - ::apache::thrift::protocol::TType _etype944; - xfer += iprot->readListBegin(_etype944, _size941); - (*(this->success)).resize(_size941); - uint32_t _i945; - for (_i945 = 0; _i945 < _size941; ++_i945) + uint32_t _size951; + ::apache::thrift::protocol::TType _etype954; + xfer += iprot->readListBegin(_etype954, _size951); + (*(this->success)).resize(_size951); + uint32_t _i955; + for (_i955 = 0; _i955 < _size951; ++_i955) { - xfer += (*(this->success))[_i945].read(iprot); + xfer += (*(this->success))[_i955].read(iprot); } xfer += iprot->readListEnd(); } @@ -7430,14 +7430,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size946; - ::apache::thrift::protocol::TType _etype949; - xfer += iprot->readListBegin(_etype949, _size946); - this->success.resize(_size946); - uint32_t _i950; - for (_i950 = 0; _i950 < _size946; ++_i950) + uint32_t _size956; + ::apache::thrift::protocol::TType _etype959; + xfer += iprot->readListBegin(_etype959, _size956); + this->success.resize(_size956); + uint32_t _i960; + for (_i960 = 0; _i960 < _size956; ++_i960) { - xfer += iprot->readString(this->success[_i950]); + xfer += iprot->readString(this->success[_i960]); } xfer += iprot->readListEnd(); } @@ -7492,10 +7492,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter951; - for (_iter951 = this->success.begin(); _iter951 != this->success.end(); ++_iter951) + std::vector ::const_iterator _iter961; + for (_iter961 = this->success.begin(); _iter961 != this->success.end(); ++_iter961) { - xfer += oprot->writeString((*_iter951)); + xfer += oprot->writeString((*_iter961)); } xfer += oprot->writeListEnd(); } @@ -7548,14 +7548,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size952; - ::apache::thrift::protocol::TType _etype955; - xfer += iprot->readListBegin(_etype955, _size952); - (*(this->success)).resize(_size952); - uint32_t _i956; - for (_i956 = 0; _i956 < _size952; ++_i956) + uint32_t _size962; + ::apache::thrift::protocol::TType _etype965; + xfer += iprot->readListBegin(_etype965, _size962); + (*(this->success)).resize(_size962); + uint32_t _i966; + for (_i966 = 0; _i966 < _size962; ++_i966) { - xfer += iprot->readString((*(this->success))[_i956]); + xfer += iprot->readString((*(this->success))[_i966]); } xfer += iprot->readListEnd(); } @@ -8889,14 +8889,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size957; - ::apache::thrift::protocol::TType _etype960; - xfer += iprot->readListBegin(_etype960, _size957); - this->new_parts.resize(_size957); - uint32_t _i961; - for (_i961 = 0; _i961 < _size957; ++_i961) + uint32_t _size967; + ::apache::thrift::protocol::TType _etype970; + xfer += iprot->readListBegin(_etype970, _size967); + this->new_parts.resize(_size967); + uint32_t _i971; + for (_i971 = 0; _i971 < _size967; ++_i971) { - xfer += this->new_parts[_i961].read(iprot); + xfer += this->new_parts[_i971].read(iprot); } xfer += iprot->readListEnd(); } @@ -8925,10 +8925,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter962; - for (_iter962 = this->new_parts.begin(); _iter962 != this->new_parts.end(); ++_iter962) + std::vector ::const_iterator _iter972; + for (_iter972 = this->new_parts.begin(); _iter972 != this->new_parts.end(); ++_iter972) { - xfer += (*_iter962).write(oprot); + xfer += (*_iter972).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8952,10 +8952,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter963; - for (_iter963 = (*(this->new_parts)).begin(); _iter963 != (*(this->new_parts)).end(); ++_iter963) + std::vector ::const_iterator _iter973; + for (_iter973 = (*(this->new_parts)).begin(); _iter973 != (*(this->new_parts)).end(); ++_iter973) { - xfer += (*_iter963).write(oprot); + xfer += (*_iter973).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9164,14 +9164,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size964; - ::apache::thrift::protocol::TType _etype967; - xfer += iprot->readListBegin(_etype967, _size964); - this->new_parts.resize(_size964); - uint32_t _i968; - for (_i968 = 0; _i968 < _size964; ++_i968) + uint32_t _size974; + ::apache::thrift::protocol::TType _etype977; + xfer += iprot->readListBegin(_etype977, _size974); + this->new_parts.resize(_size974); + uint32_t _i978; + for (_i978 = 0; _i978 < _size974; ++_i978) { - xfer += this->new_parts[_i968].read(iprot); + xfer += this->new_parts[_i978].read(iprot); } xfer += iprot->readListEnd(); } @@ -9200,10 +9200,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter969; - for (_iter969 = this->new_parts.begin(); _iter969 != this->new_parts.end(); ++_iter969) + std::vector ::const_iterator _iter979; + for (_iter979 = this->new_parts.begin(); _iter979 != this->new_parts.end(); ++_iter979) { - xfer += (*_iter969).write(oprot); + xfer += (*_iter979).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9227,10 +9227,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter970; - for (_iter970 = (*(this->new_parts)).begin(); _iter970 != (*(this->new_parts)).end(); ++_iter970) + std::vector ::const_iterator _iter980; + for (_iter980 = (*(this->new_parts)).begin(); _iter980 != (*(this->new_parts)).end(); ++_iter980) { - xfer += (*_iter970).write(oprot); + xfer += (*_iter980).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9455,14 +9455,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size971; - ::apache::thrift::protocol::TType _etype974; - xfer += iprot->readListBegin(_etype974, _size971); - this->part_vals.resize(_size971); - uint32_t _i975; - for (_i975 = 0; _i975 < _size971; ++_i975) + uint32_t _size981; + ::apache::thrift::protocol::TType _etype984; + xfer += iprot->readListBegin(_etype984, _size981); + this->part_vals.resize(_size981); + uint32_t _i985; + for (_i985 = 0; _i985 < _size981; ++_i985) { - xfer += iprot->readString(this->part_vals[_i975]); + xfer += iprot->readString(this->part_vals[_i985]); } xfer += iprot->readListEnd(); } @@ -9499,10 +9499,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter976; - for (_iter976 = this->part_vals.begin(); _iter976 != this->part_vals.end(); ++_iter976) + std::vector ::const_iterator _iter986; + for (_iter986 = this->part_vals.begin(); _iter986 != this->part_vals.end(); ++_iter986) { - xfer += oprot->writeString((*_iter976)); + xfer += oprot->writeString((*_iter986)); } xfer += oprot->writeListEnd(); } @@ -9534,10 +9534,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter977; - for (_iter977 = (*(this->part_vals)).begin(); _iter977 != (*(this->part_vals)).end(); ++_iter977) + std::vector ::const_iterator _iter987; + for (_iter987 = (*(this->part_vals)).begin(); _iter987 != (*(this->part_vals)).end(); ++_iter987) { - xfer += oprot->writeString((*_iter977)); + xfer += oprot->writeString((*_iter987)); } xfer += oprot->writeListEnd(); } @@ -10009,14 +10009,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size978; - ::apache::thrift::protocol::TType _etype981; - xfer += iprot->readListBegin(_etype981, _size978); - this->part_vals.resize(_size978); - uint32_t _i982; - for (_i982 = 0; _i982 < _size978; ++_i982) + uint32_t _size988; + ::apache::thrift::protocol::TType _etype991; + xfer += iprot->readListBegin(_etype991, _size988); + this->part_vals.resize(_size988); + uint32_t _i992; + for (_i992 = 0; _i992 < _size988; ++_i992) { - xfer += iprot->readString(this->part_vals[_i982]); + xfer += iprot->readString(this->part_vals[_i992]); } xfer += iprot->readListEnd(); } @@ -10061,10 +10061,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter983; - for (_iter983 = this->part_vals.begin(); _iter983 != this->part_vals.end(); ++_iter983) + std::vector ::const_iterator _iter993; + for (_iter993 = this->part_vals.begin(); _iter993 != this->part_vals.end(); ++_iter993) { - xfer += oprot->writeString((*_iter983)); + xfer += oprot->writeString((*_iter993)); } xfer += oprot->writeListEnd(); } @@ -10100,10 +10100,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter984; - for (_iter984 = (*(this->part_vals)).begin(); _iter984 != (*(this->part_vals)).end(); ++_iter984) + std::vector ::const_iterator _iter994; + for (_iter994 = (*(this->part_vals)).begin(); _iter994 != (*(this->part_vals)).end(); ++_iter994) { - xfer += oprot->writeString((*_iter984)); + xfer += oprot->writeString((*_iter994)); } xfer += oprot->writeListEnd(); } @@ -10906,14 +10906,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size985; - ::apache::thrift::protocol::TType _etype988; - xfer += iprot->readListBegin(_etype988, _size985); - this->part_vals.resize(_size985); - uint32_t _i989; - for (_i989 = 0; _i989 < _size985; ++_i989) + uint32_t _size995; + ::apache::thrift::protocol::TType _etype998; + xfer += iprot->readListBegin(_etype998, _size995); + this->part_vals.resize(_size995); + uint32_t _i999; + for (_i999 = 0; _i999 < _size995; ++_i999) { - xfer += iprot->readString(this->part_vals[_i989]); + xfer += iprot->readString(this->part_vals[_i999]); } xfer += iprot->readListEnd(); } @@ -10958,10 +10958,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter990; - for (_iter990 = this->part_vals.begin(); _iter990 != this->part_vals.end(); ++_iter990) + std::vector ::const_iterator _iter1000; + for (_iter1000 = this->part_vals.begin(); _iter1000 != this->part_vals.end(); ++_iter1000) { - xfer += oprot->writeString((*_iter990)); + xfer += oprot->writeString((*_iter1000)); } xfer += oprot->writeListEnd(); } @@ -10997,10 +10997,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter991; - for (_iter991 = (*(this->part_vals)).begin(); _iter991 != (*(this->part_vals)).end(); ++_iter991) + std::vector ::const_iterator _iter1001; + for (_iter1001 = (*(this->part_vals)).begin(); _iter1001 != (*(this->part_vals)).end(); ++_iter1001) { - xfer += oprot->writeString((*_iter991)); + xfer += oprot->writeString((*_iter1001)); } xfer += oprot->writeListEnd(); } @@ -11209,14 +11209,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size992; - ::apache::thrift::protocol::TType _etype995; - xfer += iprot->readListBegin(_etype995, _size992); - this->part_vals.resize(_size992); - uint32_t _i996; - for (_i996 = 0; _i996 < _size992; ++_i996) + uint32_t _size1002; + ::apache::thrift::protocol::TType _etype1005; + xfer += iprot->readListBegin(_etype1005, _size1002); + this->part_vals.resize(_size1002); + uint32_t _i1006; + for (_i1006 = 0; _i1006 < _size1002; ++_i1006) { - xfer += iprot->readString(this->part_vals[_i996]); + xfer += iprot->readString(this->part_vals[_i1006]); } xfer += iprot->readListEnd(); } @@ -11269,10 +11269,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter997; - for (_iter997 = this->part_vals.begin(); _iter997 != this->part_vals.end(); ++_iter997) + std::vector ::const_iterator _iter1007; + for (_iter1007 = this->part_vals.begin(); _iter1007 != this->part_vals.end(); ++_iter1007) { - xfer += oprot->writeString((*_iter997)); + xfer += oprot->writeString((*_iter1007)); } xfer += oprot->writeListEnd(); } @@ -11312,10 +11312,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter998; - for (_iter998 = (*(this->part_vals)).begin(); _iter998 != (*(this->part_vals)).end(); ++_iter998) + std::vector ::const_iterator _iter1008; + for (_iter1008 = (*(this->part_vals)).begin(); _iter1008 != (*(this->part_vals)).end(); ++_iter1008) { - xfer += oprot->writeString((*_iter998)); + xfer += oprot->writeString((*_iter1008)); } xfer += oprot->writeListEnd(); } @@ -12321,14 +12321,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size999; - ::apache::thrift::protocol::TType _etype1002; - xfer += iprot->readListBegin(_etype1002, _size999); - this->part_vals.resize(_size999); - uint32_t _i1003; - for (_i1003 = 0; _i1003 < _size999; ++_i1003) + uint32_t _size1009; + ::apache::thrift::protocol::TType _etype1012; + xfer += iprot->readListBegin(_etype1012, _size1009); + this->part_vals.resize(_size1009); + uint32_t _i1013; + for (_i1013 = 0; _i1013 < _size1009; ++_i1013) { - xfer += iprot->readString(this->part_vals[_i1003]); + xfer += iprot->readString(this->part_vals[_i1013]); } xfer += iprot->readListEnd(); } @@ -12365,10 +12365,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1004; - for (_iter1004 = this->part_vals.begin(); _iter1004 != this->part_vals.end(); ++_iter1004) + std::vector ::const_iterator _iter1014; + for (_iter1014 = this->part_vals.begin(); _iter1014 != this->part_vals.end(); ++_iter1014) { - xfer += oprot->writeString((*_iter1004)); + xfer += oprot->writeString((*_iter1014)); } xfer += oprot->writeListEnd(); } @@ -12400,10 +12400,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1005; - for (_iter1005 = (*(this->part_vals)).begin(); _iter1005 != (*(this->part_vals)).end(); ++_iter1005) + std::vector ::const_iterator _iter1015; + for (_iter1015 = (*(this->part_vals)).begin(); _iter1015 != (*(this->part_vals)).end(); ++_iter1015) { - xfer += oprot->writeString((*_iter1005)); + xfer += oprot->writeString((*_iter1015)); } xfer += oprot->writeListEnd(); } @@ -12592,17 +12592,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1006; - ::apache::thrift::protocol::TType _ktype1007; - ::apache::thrift::protocol::TType _vtype1008; - xfer += iprot->readMapBegin(_ktype1007, _vtype1008, _size1006); - uint32_t _i1010; - for (_i1010 = 0; _i1010 < _size1006; ++_i1010) + uint32_t _size1016; + ::apache::thrift::protocol::TType _ktype1017; + ::apache::thrift::protocol::TType _vtype1018; + xfer += iprot->readMapBegin(_ktype1017, _vtype1018, _size1016); + uint32_t _i1020; + for (_i1020 = 0; _i1020 < _size1016; ++_i1020) { - std::string _key1011; - xfer += iprot->readString(_key1011); - std::string& _val1012 = this->partitionSpecs[_key1011]; - xfer += iprot->readString(_val1012); + std::string _key1021; + xfer += iprot->readString(_key1021); + std::string& _val1022 = this->partitionSpecs[_key1021]; + xfer += iprot->readString(_val1022); } xfer += iprot->readMapEnd(); } @@ -12663,11 +12663,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1013; - for (_iter1013 = this->partitionSpecs.begin(); _iter1013 != this->partitionSpecs.end(); ++_iter1013) + std::map ::const_iterator _iter1023; + for (_iter1023 = this->partitionSpecs.begin(); _iter1023 != this->partitionSpecs.end(); ++_iter1023) { - xfer += oprot->writeString(_iter1013->first); - xfer += oprot->writeString(_iter1013->second); + xfer += oprot->writeString(_iter1023->first); + xfer += oprot->writeString(_iter1023->second); } xfer += oprot->writeMapEnd(); } @@ -12707,11 +12707,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1014; - for (_iter1014 = (*(this->partitionSpecs)).begin(); _iter1014 != (*(this->partitionSpecs)).end(); ++_iter1014) + std::map ::const_iterator _iter1024; + for (_iter1024 = (*(this->partitionSpecs)).begin(); _iter1024 != (*(this->partitionSpecs)).end(); ++_iter1024) { - xfer += oprot->writeString(_iter1014->first); - xfer += oprot->writeString(_iter1014->second); + xfer += oprot->writeString(_iter1024->first); + xfer += oprot->writeString(_iter1024->second); } xfer += oprot->writeMapEnd(); } @@ -12956,17 +12956,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1015; - ::apache::thrift::protocol::TType _ktype1016; - ::apache::thrift::protocol::TType _vtype1017; - xfer += iprot->readMapBegin(_ktype1016, _vtype1017, _size1015); - uint32_t _i1019; - for (_i1019 = 0; _i1019 < _size1015; ++_i1019) + uint32_t _size1025; + ::apache::thrift::protocol::TType _ktype1026; + ::apache::thrift::protocol::TType _vtype1027; + xfer += iprot->readMapBegin(_ktype1026, _vtype1027, _size1025); + uint32_t _i1029; + for (_i1029 = 0; _i1029 < _size1025; ++_i1029) { - std::string _key1020; - xfer += iprot->readString(_key1020); - std::string& _val1021 = this->partitionSpecs[_key1020]; - xfer += iprot->readString(_val1021); + std::string _key1030; + xfer += iprot->readString(_key1030); + std::string& _val1031 = this->partitionSpecs[_key1030]; + xfer += iprot->readString(_val1031); } xfer += iprot->readMapEnd(); } @@ -13027,11 +13027,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1022; - for (_iter1022 = this->partitionSpecs.begin(); _iter1022 != this->partitionSpecs.end(); ++_iter1022) + std::map ::const_iterator _iter1032; + for (_iter1032 = this->partitionSpecs.begin(); _iter1032 != this->partitionSpecs.end(); ++_iter1032) { - xfer += oprot->writeString(_iter1022->first); - xfer += oprot->writeString(_iter1022->second); + xfer += oprot->writeString(_iter1032->first); + xfer += oprot->writeString(_iter1032->second); } xfer += oprot->writeMapEnd(); } @@ -13071,11 +13071,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1023; - for (_iter1023 = (*(this->partitionSpecs)).begin(); _iter1023 != (*(this->partitionSpecs)).end(); ++_iter1023) + std::map ::const_iterator _iter1033; + for (_iter1033 = (*(this->partitionSpecs)).begin(); _iter1033 != (*(this->partitionSpecs)).end(); ++_iter1033) { - xfer += oprot->writeString(_iter1023->first); - xfer += oprot->writeString(_iter1023->second); + xfer += oprot->writeString(_iter1033->first); + xfer += oprot->writeString(_iter1033->second); } xfer += oprot->writeMapEnd(); } @@ -13132,14 +13132,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1024; - ::apache::thrift::protocol::TType _etype1027; - xfer += iprot->readListBegin(_etype1027, _size1024); - this->success.resize(_size1024); - uint32_t _i1028; - for (_i1028 = 0; _i1028 < _size1024; ++_i1028) + uint32_t _size1034; + ::apache::thrift::protocol::TType _etype1037; + xfer += iprot->readListBegin(_etype1037, _size1034); + this->success.resize(_size1034); + uint32_t _i1038; + for (_i1038 = 0; _i1038 < _size1034; ++_i1038) { - xfer += this->success[_i1028].read(iprot); + xfer += this->success[_i1038].read(iprot); } xfer += iprot->readListEnd(); } @@ -13202,10 +13202,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1029; - for (_iter1029 = this->success.begin(); _iter1029 != this->success.end(); ++_iter1029) + std::vector ::const_iterator _iter1039; + for (_iter1039 = this->success.begin(); _iter1039 != this->success.end(); ++_iter1039) { - xfer += (*_iter1029).write(oprot); + xfer += (*_iter1039).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13262,14 +13262,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1030; - ::apache::thrift::protocol::TType _etype1033; - xfer += iprot->readListBegin(_etype1033, _size1030); - (*(this->success)).resize(_size1030); - uint32_t _i1034; - for (_i1034 = 0; _i1034 < _size1030; ++_i1034) + uint32_t _size1040; + ::apache::thrift::protocol::TType _etype1043; + xfer += iprot->readListBegin(_etype1043, _size1040); + (*(this->success)).resize(_size1040); + uint32_t _i1044; + for (_i1044 = 0; _i1044 < _size1040; ++_i1044) { - xfer += (*(this->success))[_i1034].read(iprot); + xfer += (*(this->success))[_i1044].read(iprot); } xfer += iprot->readListEnd(); } @@ -13368,14 +13368,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1035; - ::apache::thrift::protocol::TType _etype1038; - xfer += iprot->readListBegin(_etype1038, _size1035); - this->part_vals.resize(_size1035); - uint32_t _i1039; - for (_i1039 = 0; _i1039 < _size1035; ++_i1039) + uint32_t _size1045; + ::apache::thrift::protocol::TType _etype1048; + xfer += iprot->readListBegin(_etype1048, _size1045); + this->part_vals.resize(_size1045); + uint32_t _i1049; + for (_i1049 = 0; _i1049 < _size1045; ++_i1049) { - xfer += iprot->readString(this->part_vals[_i1039]); + xfer += iprot->readString(this->part_vals[_i1049]); } xfer += iprot->readListEnd(); } @@ -13396,14 +13396,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1040; - ::apache::thrift::protocol::TType _etype1043; - xfer += iprot->readListBegin(_etype1043, _size1040); - this->group_names.resize(_size1040); - uint32_t _i1044; - for (_i1044 = 0; _i1044 < _size1040; ++_i1044) + uint32_t _size1050; + ::apache::thrift::protocol::TType _etype1053; + xfer += iprot->readListBegin(_etype1053, _size1050); + this->group_names.resize(_size1050); + uint32_t _i1054; + for (_i1054 = 0; _i1054 < _size1050; ++_i1054) { - xfer += iprot->readString(this->group_names[_i1044]); + xfer += iprot->readString(this->group_names[_i1054]); } xfer += iprot->readListEnd(); } @@ -13440,10 +13440,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1045; - for (_iter1045 = this->part_vals.begin(); _iter1045 != this->part_vals.end(); ++_iter1045) + std::vector ::const_iterator _iter1055; + for (_iter1055 = this->part_vals.begin(); _iter1055 != this->part_vals.end(); ++_iter1055) { - xfer += oprot->writeString((*_iter1045)); + xfer += oprot->writeString((*_iter1055)); } xfer += oprot->writeListEnd(); } @@ -13456,10 +13456,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1046; - for (_iter1046 = this->group_names.begin(); _iter1046 != this->group_names.end(); ++_iter1046) + std::vector ::const_iterator _iter1056; + for (_iter1056 = this->group_names.begin(); _iter1056 != this->group_names.end(); ++_iter1056) { - xfer += oprot->writeString((*_iter1046)); + xfer += oprot->writeString((*_iter1056)); } xfer += oprot->writeListEnd(); } @@ -13491,10 +13491,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1047; - for (_iter1047 = (*(this->part_vals)).begin(); _iter1047 != (*(this->part_vals)).end(); ++_iter1047) + std::vector ::const_iterator _iter1057; + for (_iter1057 = (*(this->part_vals)).begin(); _iter1057 != (*(this->part_vals)).end(); ++_iter1057) { - xfer += oprot->writeString((*_iter1047)); + xfer += oprot->writeString((*_iter1057)); } xfer += oprot->writeListEnd(); } @@ -13507,10 +13507,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1048; - for (_iter1048 = (*(this->group_names)).begin(); _iter1048 != (*(this->group_names)).end(); ++_iter1048) + std::vector ::const_iterator _iter1058; + for (_iter1058 = (*(this->group_names)).begin(); _iter1058 != (*(this->group_names)).end(); ++_iter1058) { - xfer += oprot->writeString((*_iter1048)); + xfer += oprot->writeString((*_iter1058)); } xfer += oprot->writeListEnd(); } @@ -14069,14 +14069,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1049; - ::apache::thrift::protocol::TType _etype1052; - xfer += iprot->readListBegin(_etype1052, _size1049); - this->success.resize(_size1049); - uint32_t _i1053; - for (_i1053 = 0; _i1053 < _size1049; ++_i1053) + uint32_t _size1059; + ::apache::thrift::protocol::TType _etype1062; + xfer += iprot->readListBegin(_etype1062, _size1059); + this->success.resize(_size1059); + uint32_t _i1063; + for (_i1063 = 0; _i1063 < _size1059; ++_i1063) { - xfer += this->success[_i1053].read(iprot); + xfer += this->success[_i1063].read(iprot); } xfer += iprot->readListEnd(); } @@ -14123,10 +14123,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1054; - for (_iter1054 = this->success.begin(); _iter1054 != this->success.end(); ++_iter1054) + std::vector ::const_iterator _iter1064; + for (_iter1064 = this->success.begin(); _iter1064 != this->success.end(); ++_iter1064) { - xfer += (*_iter1054).write(oprot); + xfer += (*_iter1064).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14175,14 +14175,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1055; - ::apache::thrift::protocol::TType _etype1058; - xfer += iprot->readListBegin(_etype1058, _size1055); - (*(this->success)).resize(_size1055); - uint32_t _i1059; - for (_i1059 = 0; _i1059 < _size1055; ++_i1059) + uint32_t _size1065; + ::apache::thrift::protocol::TType _etype1068; + xfer += iprot->readListBegin(_etype1068, _size1065); + (*(this->success)).resize(_size1065); + uint32_t _i1069; + for (_i1069 = 0; _i1069 < _size1065; ++_i1069) { - xfer += (*(this->success))[_i1059].read(iprot); + xfer += (*(this->success))[_i1069].read(iprot); } xfer += iprot->readListEnd(); } @@ -14281,14 +14281,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1060; - ::apache::thrift::protocol::TType _etype1063; - xfer += iprot->readListBegin(_etype1063, _size1060); - this->group_names.resize(_size1060); - uint32_t _i1064; - for (_i1064 = 0; _i1064 < _size1060; ++_i1064) + uint32_t _size1070; + ::apache::thrift::protocol::TType _etype1073; + xfer += iprot->readListBegin(_etype1073, _size1070); + this->group_names.resize(_size1070); + uint32_t _i1074; + for (_i1074 = 0; _i1074 < _size1070; ++_i1074) { - xfer += iprot->readString(this->group_names[_i1064]); + xfer += iprot->readString(this->group_names[_i1074]); } xfer += iprot->readListEnd(); } @@ -14333,10 +14333,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1065; - for (_iter1065 = this->group_names.begin(); _iter1065 != this->group_names.end(); ++_iter1065) + std::vector ::const_iterator _iter1075; + for (_iter1075 = this->group_names.begin(); _iter1075 != this->group_names.end(); ++_iter1075) { - xfer += oprot->writeString((*_iter1065)); + xfer += oprot->writeString((*_iter1075)); } xfer += oprot->writeListEnd(); } @@ -14376,10 +14376,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1066; - for (_iter1066 = (*(this->group_names)).begin(); _iter1066 != (*(this->group_names)).end(); ++_iter1066) + std::vector ::const_iterator _iter1076; + for (_iter1076 = (*(this->group_names)).begin(); _iter1076 != (*(this->group_names)).end(); ++_iter1076) { - xfer += oprot->writeString((*_iter1066)); + xfer += oprot->writeString((*_iter1076)); } xfer += oprot->writeListEnd(); } @@ -14420,14 +14420,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1067; - ::apache::thrift::protocol::TType _etype1070; - xfer += iprot->readListBegin(_etype1070, _size1067); - this->success.resize(_size1067); - uint32_t _i1071; - for (_i1071 = 0; _i1071 < _size1067; ++_i1071) + uint32_t _size1077; + ::apache::thrift::protocol::TType _etype1080; + xfer += iprot->readListBegin(_etype1080, _size1077); + this->success.resize(_size1077); + uint32_t _i1081; + for (_i1081 = 0; _i1081 < _size1077; ++_i1081) { - xfer += this->success[_i1071].read(iprot); + xfer += this->success[_i1081].read(iprot); } xfer += iprot->readListEnd(); } @@ -14474,10 +14474,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1072; - for (_iter1072 = this->success.begin(); _iter1072 != this->success.end(); ++_iter1072) + std::vector ::const_iterator _iter1082; + for (_iter1082 = this->success.begin(); _iter1082 != this->success.end(); ++_iter1082) { - xfer += (*_iter1072).write(oprot); + xfer += (*_iter1082).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14526,14 +14526,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1073; - ::apache::thrift::protocol::TType _etype1076; - xfer += iprot->readListBegin(_etype1076, _size1073); - (*(this->success)).resize(_size1073); - uint32_t _i1077; - for (_i1077 = 0; _i1077 < _size1073; ++_i1077) + uint32_t _size1083; + ::apache::thrift::protocol::TType _etype1086; + xfer += iprot->readListBegin(_etype1086, _size1083); + (*(this->success)).resize(_size1083); + uint32_t _i1087; + for (_i1087 = 0; _i1087 < _size1083; ++_i1087) { - xfer += (*(this->success))[_i1077].read(iprot); + xfer += (*(this->success))[_i1087].read(iprot); } xfer += iprot->readListEnd(); } @@ -14711,14 +14711,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1078; - ::apache::thrift::protocol::TType _etype1081; - xfer += iprot->readListBegin(_etype1081, _size1078); - this->success.resize(_size1078); - uint32_t _i1082; - for (_i1082 = 0; _i1082 < _size1078; ++_i1082) + uint32_t _size1088; + ::apache::thrift::protocol::TType _etype1091; + xfer += iprot->readListBegin(_etype1091, _size1088); + this->success.resize(_size1088); + uint32_t _i1092; + for (_i1092 = 0; _i1092 < _size1088; ++_i1092) { - xfer += this->success[_i1082].read(iprot); + xfer += this->success[_i1092].read(iprot); } xfer += iprot->readListEnd(); } @@ -14765,10 +14765,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1083; - for (_iter1083 = this->success.begin(); _iter1083 != this->success.end(); ++_iter1083) + std::vector ::const_iterator _iter1093; + for (_iter1093 = this->success.begin(); _iter1093 != this->success.end(); ++_iter1093) { - xfer += (*_iter1083).write(oprot); + xfer += (*_iter1093).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14817,14 +14817,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1084; - ::apache::thrift::protocol::TType _etype1087; - xfer += iprot->readListBegin(_etype1087, _size1084); - (*(this->success)).resize(_size1084); - uint32_t _i1088; - for (_i1088 = 0; _i1088 < _size1084; ++_i1088) + uint32_t _size1094; + ::apache::thrift::protocol::TType _etype1097; + xfer += iprot->readListBegin(_etype1097, _size1094); + (*(this->success)).resize(_size1094); + uint32_t _i1098; + for (_i1098 = 0; _i1098 < _size1094; ++_i1098) { - xfer += (*(this->success))[_i1088].read(iprot); + xfer += (*(this->success))[_i1098].read(iprot); } xfer += iprot->readListEnd(); } @@ -15002,14 +15002,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1089; - ::apache::thrift::protocol::TType _etype1092; - xfer += iprot->readListBegin(_etype1092, _size1089); - this->success.resize(_size1089); - uint32_t _i1093; - for (_i1093 = 0; _i1093 < _size1089; ++_i1093) + uint32_t _size1099; + ::apache::thrift::protocol::TType _etype1102; + xfer += iprot->readListBegin(_etype1102, _size1099); + this->success.resize(_size1099); + uint32_t _i1103; + for (_i1103 = 0; _i1103 < _size1099; ++_i1103) { - xfer += iprot->readString(this->success[_i1093]); + xfer += iprot->readString(this->success[_i1103]); } xfer += iprot->readListEnd(); } @@ -15048,10 +15048,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1094; - for (_iter1094 = this->success.begin(); _iter1094 != this->success.end(); ++_iter1094) + std::vector ::const_iterator _iter1104; + for (_iter1104 = this->success.begin(); _iter1104 != this->success.end(); ++_iter1104) { - xfer += oprot->writeString((*_iter1094)); + xfer += oprot->writeString((*_iter1104)); } xfer += oprot->writeListEnd(); } @@ -15096,14 +15096,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1095; - ::apache::thrift::protocol::TType _etype1098; - xfer += iprot->readListBegin(_etype1098, _size1095); - (*(this->success)).resize(_size1095); - uint32_t _i1099; - for (_i1099 = 0; _i1099 < _size1095; ++_i1099) + uint32_t _size1105; + ::apache::thrift::protocol::TType _etype1108; + xfer += iprot->readListBegin(_etype1108, _size1105); + (*(this->success)).resize(_size1105); + uint32_t _i1109; + for (_i1109 = 0; _i1109 < _size1105; ++_i1109) { - xfer += iprot->readString((*(this->success))[_i1099]); + xfer += iprot->readString((*(this->success))[_i1109]); } xfer += iprot->readListEnd(); } @@ -15178,14 +15178,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1100; - ::apache::thrift::protocol::TType _etype1103; - xfer += iprot->readListBegin(_etype1103, _size1100); - this->part_vals.resize(_size1100); - uint32_t _i1104; - for (_i1104 = 0; _i1104 < _size1100; ++_i1104) + uint32_t _size1110; + ::apache::thrift::protocol::TType _etype1113; + xfer += iprot->readListBegin(_etype1113, _size1110); + this->part_vals.resize(_size1110); + uint32_t _i1114; + for (_i1114 = 0; _i1114 < _size1110; ++_i1114) { - xfer += iprot->readString(this->part_vals[_i1104]); + xfer += iprot->readString(this->part_vals[_i1114]); } xfer += iprot->readListEnd(); } @@ -15230,10 +15230,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1105; - for (_iter1105 = this->part_vals.begin(); _iter1105 != this->part_vals.end(); ++_iter1105) + std::vector ::const_iterator _iter1115; + for (_iter1115 = this->part_vals.begin(); _iter1115 != this->part_vals.end(); ++_iter1115) { - xfer += oprot->writeString((*_iter1105)); + xfer += oprot->writeString((*_iter1115)); } xfer += oprot->writeListEnd(); } @@ -15269,10 +15269,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1106; - for (_iter1106 = (*(this->part_vals)).begin(); _iter1106 != (*(this->part_vals)).end(); ++_iter1106) + std::vector ::const_iterator _iter1116; + for (_iter1116 = (*(this->part_vals)).begin(); _iter1116 != (*(this->part_vals)).end(); ++_iter1116) { - xfer += oprot->writeString((*_iter1106)); + xfer += oprot->writeString((*_iter1116)); } xfer += oprot->writeListEnd(); } @@ -15317,14 +15317,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1107; - ::apache::thrift::protocol::TType _etype1110; - xfer += iprot->readListBegin(_etype1110, _size1107); - this->success.resize(_size1107); - uint32_t _i1111; - for (_i1111 = 0; _i1111 < _size1107; ++_i1111) + uint32_t _size1117; + ::apache::thrift::protocol::TType _etype1120; + xfer += iprot->readListBegin(_etype1120, _size1117); + this->success.resize(_size1117); + uint32_t _i1121; + for (_i1121 = 0; _i1121 < _size1117; ++_i1121) { - xfer += this->success[_i1111].read(iprot); + xfer += this->success[_i1121].read(iprot); } xfer += iprot->readListEnd(); } @@ -15371,10 +15371,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1112; - for (_iter1112 = this->success.begin(); _iter1112 != this->success.end(); ++_iter1112) + std::vector ::const_iterator _iter1122; + for (_iter1122 = this->success.begin(); _iter1122 != this->success.end(); ++_iter1122) { - xfer += (*_iter1112).write(oprot); + xfer += (*_iter1122).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15423,14 +15423,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1113; - ::apache::thrift::protocol::TType _etype1116; - xfer += iprot->readListBegin(_etype1116, _size1113); - (*(this->success)).resize(_size1113); - uint32_t _i1117; - for (_i1117 = 0; _i1117 < _size1113; ++_i1117) + uint32_t _size1123; + ::apache::thrift::protocol::TType _etype1126; + xfer += iprot->readListBegin(_etype1126, _size1123); + (*(this->success)).resize(_size1123); + uint32_t _i1127; + for (_i1127 = 0; _i1127 < _size1123; ++_i1127) { - xfer += (*(this->success))[_i1117].read(iprot); + xfer += (*(this->success))[_i1127].read(iprot); } xfer += iprot->readListEnd(); } @@ -15513,14 +15513,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1118; - ::apache::thrift::protocol::TType _etype1121; - xfer += iprot->readListBegin(_etype1121, _size1118); - this->part_vals.resize(_size1118); - uint32_t _i1122; - for (_i1122 = 0; _i1122 < _size1118; ++_i1122) + uint32_t _size1128; + ::apache::thrift::protocol::TType _etype1131; + xfer += iprot->readListBegin(_etype1131, _size1128); + this->part_vals.resize(_size1128); + uint32_t _i1132; + for (_i1132 = 0; _i1132 < _size1128; ++_i1132) { - xfer += iprot->readString(this->part_vals[_i1122]); + xfer += iprot->readString(this->part_vals[_i1132]); } xfer += iprot->readListEnd(); } @@ -15549,14 +15549,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1123; - ::apache::thrift::protocol::TType _etype1126; - xfer += iprot->readListBegin(_etype1126, _size1123); - this->group_names.resize(_size1123); - uint32_t _i1127; - for (_i1127 = 0; _i1127 < _size1123; ++_i1127) + uint32_t _size1133; + ::apache::thrift::protocol::TType _etype1136; + xfer += iprot->readListBegin(_etype1136, _size1133); + this->group_names.resize(_size1133); + uint32_t _i1137; + for (_i1137 = 0; _i1137 < _size1133; ++_i1137) { - xfer += iprot->readString(this->group_names[_i1127]); + xfer += iprot->readString(this->group_names[_i1137]); } xfer += iprot->readListEnd(); } @@ -15593,10 +15593,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1128; - for (_iter1128 = this->part_vals.begin(); _iter1128 != this->part_vals.end(); ++_iter1128) + std::vector ::const_iterator _iter1138; + for (_iter1138 = this->part_vals.begin(); _iter1138 != this->part_vals.end(); ++_iter1138) { - xfer += oprot->writeString((*_iter1128)); + xfer += oprot->writeString((*_iter1138)); } xfer += oprot->writeListEnd(); } @@ -15613,10 +15613,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1129; - for (_iter1129 = this->group_names.begin(); _iter1129 != this->group_names.end(); ++_iter1129) + std::vector ::const_iterator _iter1139; + for (_iter1139 = this->group_names.begin(); _iter1139 != this->group_names.end(); ++_iter1139) { - xfer += oprot->writeString((*_iter1129)); + xfer += oprot->writeString((*_iter1139)); } xfer += oprot->writeListEnd(); } @@ -15648,10 +15648,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1130; - for (_iter1130 = (*(this->part_vals)).begin(); _iter1130 != (*(this->part_vals)).end(); ++_iter1130) + std::vector ::const_iterator _iter1140; + for (_iter1140 = (*(this->part_vals)).begin(); _iter1140 != (*(this->part_vals)).end(); ++_iter1140) { - xfer += oprot->writeString((*_iter1130)); + xfer += oprot->writeString((*_iter1140)); } xfer += oprot->writeListEnd(); } @@ -15668,10 +15668,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1131; - for (_iter1131 = (*(this->group_names)).begin(); _iter1131 != (*(this->group_names)).end(); ++_iter1131) + std::vector ::const_iterator _iter1141; + for (_iter1141 = (*(this->group_names)).begin(); _iter1141 != (*(this->group_names)).end(); ++_iter1141) { - xfer += oprot->writeString((*_iter1131)); + xfer += oprot->writeString((*_iter1141)); } xfer += oprot->writeListEnd(); } @@ -15712,14 +15712,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1132; - ::apache::thrift::protocol::TType _etype1135; - xfer += iprot->readListBegin(_etype1135, _size1132); - this->success.resize(_size1132); - uint32_t _i1136; - for (_i1136 = 0; _i1136 < _size1132; ++_i1136) + uint32_t _size1142; + ::apache::thrift::protocol::TType _etype1145; + xfer += iprot->readListBegin(_etype1145, _size1142); + this->success.resize(_size1142); + uint32_t _i1146; + for (_i1146 = 0; _i1146 < _size1142; ++_i1146) { - xfer += this->success[_i1136].read(iprot); + xfer += this->success[_i1146].read(iprot); } xfer += iprot->readListEnd(); } @@ -15766,10 +15766,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1137; - for (_iter1137 = this->success.begin(); _iter1137 != this->success.end(); ++_iter1137) + std::vector ::const_iterator _iter1147; + for (_iter1147 = this->success.begin(); _iter1147 != this->success.end(); ++_iter1147) { - xfer += (*_iter1137).write(oprot); + xfer += (*_iter1147).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15818,14 +15818,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1138; - ::apache::thrift::protocol::TType _etype1141; - xfer += iprot->readListBegin(_etype1141, _size1138); - (*(this->success)).resize(_size1138); - uint32_t _i1142; - for (_i1142 = 0; _i1142 < _size1138; ++_i1142) + uint32_t _size1148; + ::apache::thrift::protocol::TType _etype1151; + xfer += iprot->readListBegin(_etype1151, _size1148); + (*(this->success)).resize(_size1148); + uint32_t _i1152; + for (_i1152 = 0; _i1152 < _size1148; ++_i1152) { - xfer += (*(this->success))[_i1142].read(iprot); + xfer += (*(this->success))[_i1152].read(iprot); } xfer += iprot->readListEnd(); } @@ -15908,14 +15908,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1143; - ::apache::thrift::protocol::TType _etype1146; - xfer += iprot->readListBegin(_etype1146, _size1143); - this->part_vals.resize(_size1143); - uint32_t _i1147; - for (_i1147 = 0; _i1147 < _size1143; ++_i1147) + uint32_t _size1153; + ::apache::thrift::protocol::TType _etype1156; + xfer += iprot->readListBegin(_etype1156, _size1153); + this->part_vals.resize(_size1153); + uint32_t _i1157; + for (_i1157 = 0; _i1157 < _size1153; ++_i1157) { - xfer += iprot->readString(this->part_vals[_i1147]); + xfer += iprot->readString(this->part_vals[_i1157]); } xfer += iprot->readListEnd(); } @@ -15960,10 +15960,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1148; - for (_iter1148 = this->part_vals.begin(); _iter1148 != this->part_vals.end(); ++_iter1148) + std::vector ::const_iterator _iter1158; + for (_iter1158 = this->part_vals.begin(); _iter1158 != this->part_vals.end(); ++_iter1158) { - xfer += oprot->writeString((*_iter1148)); + xfer += oprot->writeString((*_iter1158)); } xfer += oprot->writeListEnd(); } @@ -15999,10 +15999,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1149; - for (_iter1149 = (*(this->part_vals)).begin(); _iter1149 != (*(this->part_vals)).end(); ++_iter1149) + std::vector ::const_iterator _iter1159; + for (_iter1159 = (*(this->part_vals)).begin(); _iter1159 != (*(this->part_vals)).end(); ++_iter1159) { - xfer += oprot->writeString((*_iter1149)); + xfer += oprot->writeString((*_iter1159)); } xfer += oprot->writeListEnd(); } @@ -16047,14 +16047,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1150; - ::apache::thrift::protocol::TType _etype1153; - xfer += iprot->readListBegin(_etype1153, _size1150); - this->success.resize(_size1150); - uint32_t _i1154; - for (_i1154 = 0; _i1154 < _size1150; ++_i1154) + uint32_t _size1160; + ::apache::thrift::protocol::TType _etype1163; + xfer += iprot->readListBegin(_etype1163, _size1160); + this->success.resize(_size1160); + uint32_t _i1164; + for (_i1164 = 0; _i1164 < _size1160; ++_i1164) { - xfer += iprot->readString(this->success[_i1154]); + xfer += iprot->readString(this->success[_i1164]); } xfer += iprot->readListEnd(); } @@ -16101,10 +16101,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1155; - for (_iter1155 = this->success.begin(); _iter1155 != this->success.end(); ++_iter1155) + std::vector ::const_iterator _iter1165; + for (_iter1165 = this->success.begin(); _iter1165 != this->success.end(); ++_iter1165) { - xfer += oprot->writeString((*_iter1155)); + xfer += oprot->writeString((*_iter1165)); } xfer += oprot->writeListEnd(); } @@ -16153,14 +16153,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1156; - ::apache::thrift::protocol::TType _etype1159; - xfer += iprot->readListBegin(_etype1159, _size1156); - (*(this->success)).resize(_size1156); - uint32_t _i1160; - for (_i1160 = 0; _i1160 < _size1156; ++_i1160) + uint32_t _size1166; + ::apache::thrift::protocol::TType _etype1169; + xfer += iprot->readListBegin(_etype1169, _size1166); + (*(this->success)).resize(_size1166); + uint32_t _i1170; + for (_i1170 = 0; _i1170 < _size1166; ++_i1170) { - xfer += iprot->readString((*(this->success))[_i1160]); + xfer += iprot->readString((*(this->success))[_i1170]); } xfer += iprot->readListEnd(); } @@ -16354,14 +16354,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1161; - ::apache::thrift::protocol::TType _etype1164; - xfer += iprot->readListBegin(_etype1164, _size1161); - this->success.resize(_size1161); - uint32_t _i1165; - for (_i1165 = 0; _i1165 < _size1161; ++_i1165) + uint32_t _size1171; + ::apache::thrift::protocol::TType _etype1174; + xfer += iprot->readListBegin(_etype1174, _size1171); + this->success.resize(_size1171); + uint32_t _i1175; + for (_i1175 = 0; _i1175 < _size1171; ++_i1175) { - xfer += this->success[_i1165].read(iprot); + xfer += this->success[_i1175].read(iprot); } xfer += iprot->readListEnd(); } @@ -16408,10 +16408,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1166; - for (_iter1166 = this->success.begin(); _iter1166 != this->success.end(); ++_iter1166) + std::vector ::const_iterator _iter1176; + for (_iter1176 = this->success.begin(); _iter1176 != this->success.end(); ++_iter1176) { - xfer += (*_iter1166).write(oprot); + xfer += (*_iter1176).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16460,14 +16460,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1167; - ::apache::thrift::protocol::TType _etype1170; - xfer += iprot->readListBegin(_etype1170, _size1167); - (*(this->success)).resize(_size1167); - uint32_t _i1171; - for (_i1171 = 0; _i1171 < _size1167; ++_i1171) + uint32_t _size1177; + ::apache::thrift::protocol::TType _etype1180; + xfer += iprot->readListBegin(_etype1180, _size1177); + (*(this->success)).resize(_size1177); + uint32_t _i1181; + for (_i1181 = 0; _i1181 < _size1177; ++_i1181) { - xfer += (*(this->success))[_i1171].read(iprot); + xfer += (*(this->success))[_i1181].read(iprot); } xfer += iprot->readListEnd(); } @@ -16661,14 +16661,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1172; - ::apache::thrift::protocol::TType _etype1175; - xfer += iprot->readListBegin(_etype1175, _size1172); - this->success.resize(_size1172); - uint32_t _i1176; - for (_i1176 = 0; _i1176 < _size1172; ++_i1176) + uint32_t _size1182; + ::apache::thrift::protocol::TType _etype1185; + xfer += iprot->readListBegin(_etype1185, _size1182); + this->success.resize(_size1182); + uint32_t _i1186; + for (_i1186 = 0; _i1186 < _size1182; ++_i1186) { - xfer += this->success[_i1176].read(iprot); + xfer += this->success[_i1186].read(iprot); } xfer += iprot->readListEnd(); } @@ -16715,10 +16715,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1177; - for (_iter1177 = this->success.begin(); _iter1177 != this->success.end(); ++_iter1177) + std::vector ::const_iterator _iter1187; + for (_iter1187 = this->success.begin(); _iter1187 != this->success.end(); ++_iter1187) { - xfer += (*_iter1177).write(oprot); + xfer += (*_iter1187).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16767,14 +16767,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1178; - ::apache::thrift::protocol::TType _etype1181; - xfer += iprot->readListBegin(_etype1181, _size1178); - (*(this->success)).resize(_size1178); - uint32_t _i1182; - for (_i1182 = 0; _i1182 < _size1178; ++_i1182) + uint32_t _size1188; + ::apache::thrift::protocol::TType _etype1191; + xfer += iprot->readListBegin(_etype1191, _size1188); + (*(this->success)).resize(_size1188); + uint32_t _i1192; + for (_i1192 = 0; _i1192 < _size1188; ++_i1192) { - xfer += (*(this->success))[_i1182].read(iprot); + xfer += (*(this->success))[_i1192].read(iprot); } xfer += iprot->readListEnd(); } @@ -17343,14 +17343,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1183; - ::apache::thrift::protocol::TType _etype1186; - xfer += iprot->readListBegin(_etype1186, _size1183); - this->names.resize(_size1183); - uint32_t _i1187; - for (_i1187 = 0; _i1187 < _size1183; ++_i1187) + uint32_t _size1193; + ::apache::thrift::protocol::TType _etype1196; + xfer += iprot->readListBegin(_etype1196, _size1193); + this->names.resize(_size1193); + uint32_t _i1197; + for (_i1197 = 0; _i1197 < _size1193; ++_i1197) { - xfer += iprot->readString(this->names[_i1187]); + xfer += iprot->readString(this->names[_i1197]); } xfer += iprot->readListEnd(); } @@ -17387,10 +17387,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1188; - for (_iter1188 = this->names.begin(); _iter1188 != this->names.end(); ++_iter1188) + std::vector ::const_iterator _iter1198; + for (_iter1198 = this->names.begin(); _iter1198 != this->names.end(); ++_iter1198) { - xfer += oprot->writeString((*_iter1188)); + xfer += oprot->writeString((*_iter1198)); } xfer += oprot->writeListEnd(); } @@ -17422,10 +17422,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1189; - for (_iter1189 = (*(this->names)).begin(); _iter1189 != (*(this->names)).end(); ++_iter1189) + std::vector ::const_iterator _iter1199; + for (_iter1199 = (*(this->names)).begin(); _iter1199 != (*(this->names)).end(); ++_iter1199) { - xfer += oprot->writeString((*_iter1189)); + xfer += oprot->writeString((*_iter1199)); } xfer += oprot->writeListEnd(); } @@ -17466,14 +17466,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1190; - ::apache::thrift::protocol::TType _etype1193; - xfer += iprot->readListBegin(_etype1193, _size1190); - this->success.resize(_size1190); - uint32_t _i1194; - for (_i1194 = 0; _i1194 < _size1190; ++_i1194) + uint32_t _size1200; + ::apache::thrift::protocol::TType _etype1203; + xfer += iprot->readListBegin(_etype1203, _size1200); + this->success.resize(_size1200); + uint32_t _i1204; + for (_i1204 = 0; _i1204 < _size1200; ++_i1204) { - xfer += this->success[_i1194].read(iprot); + xfer += this->success[_i1204].read(iprot); } xfer += iprot->readListEnd(); } @@ -17520,10 +17520,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1195; - for (_iter1195 = this->success.begin(); _iter1195 != this->success.end(); ++_iter1195) + std::vector ::const_iterator _iter1205; + for (_iter1205 = this->success.begin(); _iter1205 != this->success.end(); ++_iter1205) { - xfer += (*_iter1195).write(oprot); + xfer += (*_iter1205).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17572,14 +17572,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1196; - ::apache::thrift::protocol::TType _etype1199; - xfer += iprot->readListBegin(_etype1199, _size1196); - (*(this->success)).resize(_size1196); - uint32_t _i1200; - for (_i1200 = 0; _i1200 < _size1196; ++_i1200) + uint32_t _size1206; + ::apache::thrift::protocol::TType _etype1209; + xfer += iprot->readListBegin(_etype1209, _size1206); + (*(this->success)).resize(_size1206); + uint32_t _i1210; + for (_i1210 = 0; _i1210 < _size1206; ++_i1210) { - xfer += (*(this->success))[_i1200].read(iprot); + xfer += (*(this->success))[_i1210].read(iprot); } xfer += iprot->readListEnd(); } @@ -17901,14 +17901,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1201; - ::apache::thrift::protocol::TType _etype1204; - xfer += iprot->readListBegin(_etype1204, _size1201); - this->new_parts.resize(_size1201); - uint32_t _i1205; - for (_i1205 = 0; _i1205 < _size1201; ++_i1205) + uint32_t _size1211; + ::apache::thrift::protocol::TType _etype1214; + xfer += iprot->readListBegin(_etype1214, _size1211); + this->new_parts.resize(_size1211); + uint32_t _i1215; + for (_i1215 = 0; _i1215 < _size1211; ++_i1215) { - xfer += this->new_parts[_i1205].read(iprot); + xfer += this->new_parts[_i1215].read(iprot); } xfer += iprot->readListEnd(); } @@ -17945,10 +17945,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1206; - for (_iter1206 = this->new_parts.begin(); _iter1206 != this->new_parts.end(); ++_iter1206) + std::vector ::const_iterator _iter1216; + for (_iter1216 = this->new_parts.begin(); _iter1216 != this->new_parts.end(); ++_iter1216) { - xfer += (*_iter1206).write(oprot); + xfer += (*_iter1216).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17980,10 +17980,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1207; - for (_iter1207 = (*(this->new_parts)).begin(); _iter1207 != (*(this->new_parts)).end(); ++_iter1207) + std::vector ::const_iterator _iter1217; + for (_iter1217 = (*(this->new_parts)).begin(); _iter1217 != (*(this->new_parts)).end(); ++_iter1217) { - xfer += (*_iter1207).write(oprot); + xfer += (*_iter1217).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18168,14 +18168,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1208; - ::apache::thrift::protocol::TType _etype1211; - xfer += iprot->readListBegin(_etype1211, _size1208); - this->new_parts.resize(_size1208); - uint32_t _i1212; - for (_i1212 = 0; _i1212 < _size1208; ++_i1212) + uint32_t _size1218; + ::apache::thrift::protocol::TType _etype1221; + xfer += iprot->readListBegin(_etype1221, _size1218); + this->new_parts.resize(_size1218); + uint32_t _i1222; + for (_i1222 = 0; _i1222 < _size1218; ++_i1222) { - xfer += this->new_parts[_i1212].read(iprot); + xfer += this->new_parts[_i1222].read(iprot); } xfer += iprot->readListEnd(); } @@ -18220,10 +18220,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1213; - for (_iter1213 = this->new_parts.begin(); _iter1213 != this->new_parts.end(); ++_iter1213) + std::vector ::const_iterator _iter1223; + for (_iter1223 = this->new_parts.begin(); _iter1223 != this->new_parts.end(); ++_iter1223) { - xfer += (*_iter1213).write(oprot); + xfer += (*_iter1223).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18259,10 +18259,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1214; - for (_iter1214 = (*(this->new_parts)).begin(); _iter1214 != (*(this->new_parts)).end(); ++_iter1214) + std::vector ::const_iterator _iter1224; + for (_iter1224 = (*(this->new_parts)).begin(); _iter1224 != (*(this->new_parts)).end(); ++_iter1224) { - xfer += (*_iter1214).write(oprot); + xfer += (*_iter1224).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18706,14 +18706,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1215; - ::apache::thrift::protocol::TType _etype1218; - xfer += iprot->readListBegin(_etype1218, _size1215); - this->part_vals.resize(_size1215); - uint32_t _i1219; - for (_i1219 = 0; _i1219 < _size1215; ++_i1219) + uint32_t _size1225; + ::apache::thrift::protocol::TType _etype1228; + xfer += iprot->readListBegin(_etype1228, _size1225); + this->part_vals.resize(_size1225); + uint32_t _i1229; + for (_i1229 = 0; _i1229 < _size1225; ++_i1229) { - xfer += iprot->readString(this->part_vals[_i1219]); + xfer += iprot->readString(this->part_vals[_i1229]); } xfer += iprot->readListEnd(); } @@ -18758,10 +18758,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1220; - for (_iter1220 = this->part_vals.begin(); _iter1220 != this->part_vals.end(); ++_iter1220) + std::vector ::const_iterator _iter1230; + for (_iter1230 = this->part_vals.begin(); _iter1230 != this->part_vals.end(); ++_iter1230) { - xfer += oprot->writeString((*_iter1220)); + xfer += oprot->writeString((*_iter1230)); } xfer += oprot->writeListEnd(); } @@ -18797,10 +18797,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1221; - for (_iter1221 = (*(this->part_vals)).begin(); _iter1221 != (*(this->part_vals)).end(); ++_iter1221) + std::vector ::const_iterator _iter1231; + for (_iter1231 = (*(this->part_vals)).begin(); _iter1231 != (*(this->part_vals)).end(); ++_iter1231) { - xfer += oprot->writeString((*_iter1221)); + xfer += oprot->writeString((*_iter1231)); } xfer += oprot->writeListEnd(); } @@ -18973,14 +18973,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1222; - ::apache::thrift::protocol::TType _etype1225; - xfer += iprot->readListBegin(_etype1225, _size1222); - this->part_vals.resize(_size1222); - uint32_t _i1226; - for (_i1226 = 0; _i1226 < _size1222; ++_i1226) + uint32_t _size1232; + ::apache::thrift::protocol::TType _etype1235; + xfer += iprot->readListBegin(_etype1235, _size1232); + this->part_vals.resize(_size1232); + uint32_t _i1236; + for (_i1236 = 0; _i1236 < _size1232; ++_i1236) { - xfer += iprot->readString(this->part_vals[_i1226]); + xfer += iprot->readString(this->part_vals[_i1236]); } xfer += iprot->readListEnd(); } @@ -19017,10 +19017,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1227; - for (_iter1227 = this->part_vals.begin(); _iter1227 != this->part_vals.end(); ++_iter1227) + std::vector ::const_iterator _iter1237; + for (_iter1237 = this->part_vals.begin(); _iter1237 != this->part_vals.end(); ++_iter1237) { - xfer += oprot->writeString((*_iter1227)); + xfer += oprot->writeString((*_iter1237)); } xfer += oprot->writeListEnd(); } @@ -19048,10 +19048,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1228; - for (_iter1228 = (*(this->part_vals)).begin(); _iter1228 != (*(this->part_vals)).end(); ++_iter1228) + std::vector ::const_iterator _iter1238; + for (_iter1238 = (*(this->part_vals)).begin(); _iter1238 != (*(this->part_vals)).end(); ++_iter1238) { - xfer += oprot->writeString((*_iter1228)); + xfer += oprot->writeString((*_iter1238)); } xfer += oprot->writeListEnd(); } @@ -19526,14 +19526,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1229; - ::apache::thrift::protocol::TType _etype1232; - xfer += iprot->readListBegin(_etype1232, _size1229); - this->success.resize(_size1229); - uint32_t _i1233; - for (_i1233 = 0; _i1233 < _size1229; ++_i1233) + uint32_t _size1239; + ::apache::thrift::protocol::TType _etype1242; + xfer += iprot->readListBegin(_etype1242, _size1239); + this->success.resize(_size1239); + uint32_t _i1243; + for (_i1243 = 0; _i1243 < _size1239; ++_i1243) { - xfer += iprot->readString(this->success[_i1233]); + xfer += iprot->readString(this->success[_i1243]); } xfer += iprot->readListEnd(); } @@ -19572,10 +19572,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1234; - for (_iter1234 = this->success.begin(); _iter1234 != this->success.end(); ++_iter1234) + std::vector ::const_iterator _iter1244; + for (_iter1244 = this->success.begin(); _iter1244 != this->success.end(); ++_iter1244) { - xfer += oprot->writeString((*_iter1234)); + xfer += oprot->writeString((*_iter1244)); } xfer += oprot->writeListEnd(); } @@ -19620,14 +19620,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1235; - ::apache::thrift::protocol::TType _etype1238; - xfer += iprot->readListBegin(_etype1238, _size1235); - (*(this->success)).resize(_size1235); - uint32_t _i1239; - for (_i1239 = 0; _i1239 < _size1235; ++_i1239) + uint32_t _size1245; + ::apache::thrift::protocol::TType _etype1248; + xfer += iprot->readListBegin(_etype1248, _size1245); + (*(this->success)).resize(_size1245); + uint32_t _i1249; + for (_i1249 = 0; _i1249 < _size1245; ++_i1249) { - xfer += iprot->readString((*(this->success))[_i1239]); + xfer += iprot->readString((*(this->success))[_i1249]); } xfer += iprot->readListEnd(); } @@ -19765,17 +19765,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1240; - ::apache::thrift::protocol::TType _ktype1241; - ::apache::thrift::protocol::TType _vtype1242; - xfer += iprot->readMapBegin(_ktype1241, _vtype1242, _size1240); - uint32_t _i1244; - for (_i1244 = 0; _i1244 < _size1240; ++_i1244) + uint32_t _size1250; + ::apache::thrift::protocol::TType _ktype1251; + ::apache::thrift::protocol::TType _vtype1252; + xfer += iprot->readMapBegin(_ktype1251, _vtype1252, _size1250); + uint32_t _i1254; + for (_i1254 = 0; _i1254 < _size1250; ++_i1254) { - std::string _key1245; - xfer += iprot->readString(_key1245); - std::string& _val1246 = this->success[_key1245]; - xfer += iprot->readString(_val1246); + std::string _key1255; + xfer += iprot->readString(_key1255); + std::string& _val1256 = this->success[_key1255]; + xfer += iprot->readString(_val1256); } xfer += iprot->readMapEnd(); } @@ -19814,11 +19814,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1247; - for (_iter1247 = this->success.begin(); _iter1247 != this->success.end(); ++_iter1247) + std::map ::const_iterator _iter1257; + for (_iter1257 = this->success.begin(); _iter1257 != this->success.end(); ++_iter1257) { - xfer += oprot->writeString(_iter1247->first); - xfer += oprot->writeString(_iter1247->second); + xfer += oprot->writeString(_iter1257->first); + xfer += oprot->writeString(_iter1257->second); } xfer += oprot->writeMapEnd(); } @@ -19863,17 +19863,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1248; - ::apache::thrift::protocol::TType _ktype1249; - ::apache::thrift::protocol::TType _vtype1250; - xfer += iprot->readMapBegin(_ktype1249, _vtype1250, _size1248); - uint32_t _i1252; - for (_i1252 = 0; _i1252 < _size1248; ++_i1252) + uint32_t _size1258; + ::apache::thrift::protocol::TType _ktype1259; + ::apache::thrift::protocol::TType _vtype1260; + xfer += iprot->readMapBegin(_ktype1259, _vtype1260, _size1258); + uint32_t _i1262; + for (_i1262 = 0; _i1262 < _size1258; ++_i1262) { - std::string _key1253; - xfer += iprot->readString(_key1253); - std::string& _val1254 = (*(this->success))[_key1253]; - xfer += iprot->readString(_val1254); + std::string _key1263; + xfer += iprot->readString(_key1263); + std::string& _val1264 = (*(this->success))[_key1263]; + xfer += iprot->readString(_val1264); } xfer += iprot->readMapEnd(); } @@ -19948,17 +19948,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1255; - ::apache::thrift::protocol::TType _ktype1256; - ::apache::thrift::protocol::TType _vtype1257; - xfer += iprot->readMapBegin(_ktype1256, _vtype1257, _size1255); - uint32_t _i1259; - for (_i1259 = 0; _i1259 < _size1255; ++_i1259) + uint32_t _size1265; + ::apache::thrift::protocol::TType _ktype1266; + ::apache::thrift::protocol::TType _vtype1267; + xfer += iprot->readMapBegin(_ktype1266, _vtype1267, _size1265); + uint32_t _i1269; + for (_i1269 = 0; _i1269 < _size1265; ++_i1269) { - std::string _key1260; - xfer += iprot->readString(_key1260); - std::string& _val1261 = this->part_vals[_key1260]; - xfer += iprot->readString(_val1261); + std::string _key1270; + xfer += iprot->readString(_key1270); + std::string& _val1271 = this->part_vals[_key1270]; + xfer += iprot->readString(_val1271); } xfer += iprot->readMapEnd(); } @@ -19969,9 +19969,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1262; - xfer += iprot->readI32(ecast1262); - this->eventType = (PartitionEventType::type)ecast1262; + int32_t ecast1272; + xfer += iprot->readI32(ecast1272); + this->eventType = (PartitionEventType::type)ecast1272; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -20005,11 +20005,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1263; - for (_iter1263 = this->part_vals.begin(); _iter1263 != this->part_vals.end(); ++_iter1263) + std::map ::const_iterator _iter1273; + for (_iter1273 = this->part_vals.begin(); _iter1273 != this->part_vals.end(); ++_iter1273) { - xfer += oprot->writeString(_iter1263->first); - xfer += oprot->writeString(_iter1263->second); + xfer += oprot->writeString(_iter1273->first); + xfer += oprot->writeString(_iter1273->second); } xfer += oprot->writeMapEnd(); } @@ -20045,11 +20045,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1264; - for (_iter1264 = (*(this->part_vals)).begin(); _iter1264 != (*(this->part_vals)).end(); ++_iter1264) + std::map ::const_iterator _iter1274; + for (_iter1274 = (*(this->part_vals)).begin(); _iter1274 != (*(this->part_vals)).end(); ++_iter1274) { - xfer += oprot->writeString(_iter1264->first); - xfer += oprot->writeString(_iter1264->second); + xfer += oprot->writeString(_iter1274->first); + xfer += oprot->writeString(_iter1274->second); } xfer += oprot->writeMapEnd(); } @@ -20318,17 +20318,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1265; - ::apache::thrift::protocol::TType _ktype1266; - ::apache::thrift::protocol::TType _vtype1267; - xfer += iprot->readMapBegin(_ktype1266, _vtype1267, _size1265); - uint32_t _i1269; - for (_i1269 = 0; _i1269 < _size1265; ++_i1269) + uint32_t _size1275; + ::apache::thrift::protocol::TType _ktype1276; + ::apache::thrift::protocol::TType _vtype1277; + xfer += iprot->readMapBegin(_ktype1276, _vtype1277, _size1275); + uint32_t _i1279; + for (_i1279 = 0; _i1279 < _size1275; ++_i1279) { - std::string _key1270; - xfer += iprot->readString(_key1270); - std::string& _val1271 = this->part_vals[_key1270]; - xfer += iprot->readString(_val1271); + std::string _key1280; + xfer += iprot->readString(_key1280); + std::string& _val1281 = this->part_vals[_key1280]; + xfer += iprot->readString(_val1281); } xfer += iprot->readMapEnd(); } @@ -20339,9 +20339,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1272; - xfer += iprot->readI32(ecast1272); - this->eventType = (PartitionEventType::type)ecast1272; + int32_t ecast1282; + xfer += iprot->readI32(ecast1282); + this->eventType = (PartitionEventType::type)ecast1282; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -20375,11 +20375,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1273; - for (_iter1273 = this->part_vals.begin(); _iter1273 != this->part_vals.end(); ++_iter1273) + std::map ::const_iterator _iter1283; + for (_iter1283 = this->part_vals.begin(); _iter1283 != this->part_vals.end(); ++_iter1283) { - xfer += oprot->writeString(_iter1273->first); - xfer += oprot->writeString(_iter1273->second); + xfer += oprot->writeString(_iter1283->first); + xfer += oprot->writeString(_iter1283->second); } xfer += oprot->writeMapEnd(); } @@ -20415,11 +20415,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1274; - for (_iter1274 = (*(this->part_vals)).begin(); _iter1274 != (*(this->part_vals)).end(); ++_iter1274) + std::map ::const_iterator _iter1284; + for (_iter1284 = (*(this->part_vals)).begin(); _iter1284 != (*(this->part_vals)).end(); ++_iter1284) { - xfer += oprot->writeString(_iter1274->first); - xfer += oprot->writeString(_iter1274->second); + xfer += oprot->writeString(_iter1284->first); + xfer += oprot->writeString(_iter1284->second); } xfer += oprot->writeMapEnd(); } @@ -21855,14 +21855,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1275; - ::apache::thrift::protocol::TType _etype1278; - xfer += iprot->readListBegin(_etype1278, _size1275); - this->success.resize(_size1275); - uint32_t _i1279; - for (_i1279 = 0; _i1279 < _size1275; ++_i1279) + uint32_t _size1285; + ::apache::thrift::protocol::TType _etype1288; + xfer += iprot->readListBegin(_etype1288, _size1285); + this->success.resize(_size1285); + uint32_t _i1289; + for (_i1289 = 0; _i1289 < _size1285; ++_i1289) { - xfer += this->success[_i1279].read(iprot); + xfer += this->success[_i1289].read(iprot); } xfer += iprot->readListEnd(); } @@ -21909,10 +21909,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1280; - for (_iter1280 = this->success.begin(); _iter1280 != this->success.end(); ++_iter1280) + std::vector ::const_iterator _iter1290; + for (_iter1290 = this->success.begin(); _iter1290 != this->success.end(); ++_iter1290) { - xfer += (*_iter1280).write(oprot); + xfer += (*_iter1290).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21961,14 +21961,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1281; - ::apache::thrift::protocol::TType _etype1284; - xfer += iprot->readListBegin(_etype1284, _size1281); - (*(this->success)).resize(_size1281); - uint32_t _i1285; - for (_i1285 = 0; _i1285 < _size1281; ++_i1285) + uint32_t _size1291; + ::apache::thrift::protocol::TType _etype1294; + xfer += iprot->readListBegin(_etype1294, _size1291); + (*(this->success)).resize(_size1291); + uint32_t _i1295; + for (_i1295 = 0; _i1295 < _size1291; ++_i1295) { - xfer += (*(this->success))[_i1285].read(iprot); + xfer += (*(this->success))[_i1295].read(iprot); } xfer += iprot->readListEnd(); } @@ -22146,14 +22146,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1286; - ::apache::thrift::protocol::TType _etype1289; - xfer += iprot->readListBegin(_etype1289, _size1286); - this->success.resize(_size1286); - uint32_t _i1290; - for (_i1290 = 0; _i1290 < _size1286; ++_i1290) + uint32_t _size1296; + ::apache::thrift::protocol::TType _etype1299; + xfer += iprot->readListBegin(_etype1299, _size1296); + this->success.resize(_size1296); + uint32_t _i1300; + for (_i1300 = 0; _i1300 < _size1296; ++_i1300) { - xfer += iprot->readString(this->success[_i1290]); + xfer += iprot->readString(this->success[_i1300]); } xfer += iprot->readListEnd(); } @@ -22192,10 +22192,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1291; - for (_iter1291 = this->success.begin(); _iter1291 != this->success.end(); ++_iter1291) + std::vector ::const_iterator _iter1301; + for (_iter1301 = this->success.begin(); _iter1301 != this->success.end(); ++_iter1301) { - xfer += oprot->writeString((*_iter1291)); + xfer += oprot->writeString((*_iter1301)); } xfer += oprot->writeListEnd(); } @@ -22240,14 +22240,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1292; - ::apache::thrift::protocol::TType _etype1295; - xfer += iprot->readListBegin(_etype1295, _size1292); - (*(this->success)).resize(_size1292); - uint32_t _i1296; - for (_i1296 = 0; _i1296 < _size1292; ++_i1296) + uint32_t _size1302; + ::apache::thrift::protocol::TType _etype1305; + xfer += iprot->readListBegin(_etype1305, _size1302); + (*(this->success)).resize(_size1302); + uint32_t _i1306; + for (_i1306 = 0; _i1306 < _size1302; ++_i1306) { - xfer += iprot->readString((*(this->success))[_i1296]); + xfer += iprot->readString((*(this->success))[_i1306]); } xfer += iprot->readListEnd(); } @@ -26274,14 +26274,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1297; - ::apache::thrift::protocol::TType _etype1300; - xfer += iprot->readListBegin(_etype1300, _size1297); - this->success.resize(_size1297); - uint32_t _i1301; - for (_i1301 = 0; _i1301 < _size1297; ++_i1301) + uint32_t _size1307; + ::apache::thrift::protocol::TType _etype1310; + xfer += iprot->readListBegin(_etype1310, _size1307); + this->success.resize(_size1307); + uint32_t _i1311; + for (_i1311 = 0; _i1311 < _size1307; ++_i1311) { - xfer += iprot->readString(this->success[_i1301]); + xfer += iprot->readString(this->success[_i1311]); } xfer += iprot->readListEnd(); } @@ -26320,10 +26320,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1302; - for (_iter1302 = this->success.begin(); _iter1302 != this->success.end(); ++_iter1302) + std::vector ::const_iterator _iter1312; + for (_iter1312 = this->success.begin(); _iter1312 != this->success.end(); ++_iter1312) { - xfer += oprot->writeString((*_iter1302)); + xfer += oprot->writeString((*_iter1312)); } xfer += oprot->writeListEnd(); } @@ -26368,14 +26368,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1303; - ::apache::thrift::protocol::TType _etype1306; - xfer += iprot->readListBegin(_etype1306, _size1303); - (*(this->success)).resize(_size1303); - uint32_t _i1307; - for (_i1307 = 0; _i1307 < _size1303; ++_i1307) + uint32_t _size1313; + ::apache::thrift::protocol::TType _etype1316; + xfer += iprot->readListBegin(_etype1316, _size1313); + (*(this->success)).resize(_size1313); + uint32_t _i1317; + for (_i1317 = 0; _i1317 < _size1313; ++_i1317) { - xfer += iprot->readString((*(this->success))[_i1307]); + xfer += iprot->readString((*(this->success))[_i1317]); } xfer += iprot->readListEnd(); } @@ -27335,14 +27335,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1308; - ::apache::thrift::protocol::TType _etype1311; - xfer += iprot->readListBegin(_etype1311, _size1308); - this->success.resize(_size1308); - uint32_t _i1312; - for (_i1312 = 0; _i1312 < _size1308; ++_i1312) + uint32_t _size1318; + ::apache::thrift::protocol::TType _etype1321; + xfer += iprot->readListBegin(_etype1321, _size1318); + this->success.resize(_size1318); + uint32_t _i1322; + for (_i1322 = 0; _i1322 < _size1318; ++_i1322) { - xfer += iprot->readString(this->success[_i1312]); + xfer += iprot->readString(this->success[_i1322]); } xfer += iprot->readListEnd(); } @@ -27381,10 +27381,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1313; - for (_iter1313 = this->success.begin(); _iter1313 != this->success.end(); ++_iter1313) + std::vector ::const_iterator _iter1323; + for (_iter1323 = this->success.begin(); _iter1323 != this->success.end(); ++_iter1323) { - xfer += oprot->writeString((*_iter1313)); + xfer += oprot->writeString((*_iter1323)); } xfer += oprot->writeListEnd(); } @@ -27429,14 +27429,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1314; - ::apache::thrift::protocol::TType _etype1317; - xfer += iprot->readListBegin(_etype1317, _size1314); - (*(this->success)).resize(_size1314); - uint32_t _i1318; - for (_i1318 = 0; _i1318 < _size1314; ++_i1318) + uint32_t _size1324; + ::apache::thrift::protocol::TType _etype1327; + xfer += iprot->readListBegin(_etype1327, _size1324); + (*(this->success)).resize(_size1324); + uint32_t _i1328; + for (_i1328 = 0; _i1328 < _size1324; ++_i1328) { - xfer += iprot->readString((*(this->success))[_i1318]); + xfer += iprot->readString((*(this->success))[_i1328]); } xfer += iprot->readListEnd(); } @@ -27509,9 +27509,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1319; - xfer += iprot->readI32(ecast1319); - this->principal_type = (PrincipalType::type)ecast1319; + int32_t ecast1329; + xfer += iprot->readI32(ecast1329); + this->principal_type = (PrincipalType::type)ecast1329; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -27527,9 +27527,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1320; - xfer += iprot->readI32(ecast1320); - this->grantorType = (PrincipalType::type)ecast1320; + int32_t ecast1330; + xfer += iprot->readI32(ecast1330); + this->grantorType = (PrincipalType::type)ecast1330; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -27800,9 +27800,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1321; - xfer += iprot->readI32(ecast1321); - this->principal_type = (PrincipalType::type)ecast1321; + int32_t ecast1331; + xfer += iprot->readI32(ecast1331); + this->principal_type = (PrincipalType::type)ecast1331; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -28033,9 +28033,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1322; - xfer += iprot->readI32(ecast1322); - this->principal_type = (PrincipalType::type)ecast1322; + int32_t ecast1332; + xfer += iprot->readI32(ecast1332); + this->principal_type = (PrincipalType::type)ecast1332; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -28124,14 +28124,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1323; - ::apache::thrift::protocol::TType _etype1326; - xfer += iprot->readListBegin(_etype1326, _size1323); - this->success.resize(_size1323); - uint32_t _i1327; - for (_i1327 = 0; _i1327 < _size1323; ++_i1327) + uint32_t _size1333; + ::apache::thrift::protocol::TType _etype1336; + xfer += iprot->readListBegin(_etype1336, _size1333); + this->success.resize(_size1333); + uint32_t _i1337; + for (_i1337 = 0; _i1337 < _size1333; ++_i1337) { - xfer += this->success[_i1327].read(iprot); + xfer += this->success[_i1337].read(iprot); } xfer += iprot->readListEnd(); } @@ -28170,10 +28170,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1328; - for (_iter1328 = this->success.begin(); _iter1328 != this->success.end(); ++_iter1328) + std::vector ::const_iterator _iter1338; + for (_iter1338 = this->success.begin(); _iter1338 != this->success.end(); ++_iter1338) { - xfer += (*_iter1328).write(oprot); + xfer += (*_iter1338).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28218,14 +28218,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1329; - ::apache::thrift::protocol::TType _etype1332; - xfer += iprot->readListBegin(_etype1332, _size1329); - (*(this->success)).resize(_size1329); - uint32_t _i1333; - for (_i1333 = 0; _i1333 < _size1329; ++_i1333) + uint32_t _size1339; + ::apache::thrift::protocol::TType _etype1342; + xfer += iprot->readListBegin(_etype1342, _size1339); + (*(this->success)).resize(_size1339); + uint32_t _i1343; + for (_i1343 = 0; _i1343 < _size1339; ++_i1343) { - xfer += (*(this->success))[_i1333].read(iprot); + xfer += (*(this->success))[_i1343].read(iprot); } xfer += iprot->readListEnd(); } @@ -28921,14 +28921,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1334; - ::apache::thrift::protocol::TType _etype1337; - xfer += iprot->readListBegin(_etype1337, _size1334); - this->group_names.resize(_size1334); - uint32_t _i1338; - for (_i1338 = 0; _i1338 < _size1334; ++_i1338) + uint32_t _size1344; + ::apache::thrift::protocol::TType _etype1347; + xfer += iprot->readListBegin(_etype1347, _size1344); + this->group_names.resize(_size1344); + uint32_t _i1348; + for (_i1348 = 0; _i1348 < _size1344; ++_i1348) { - xfer += iprot->readString(this->group_names[_i1338]); + xfer += iprot->readString(this->group_names[_i1348]); } xfer += iprot->readListEnd(); } @@ -28965,10 +28965,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1339; - for (_iter1339 = this->group_names.begin(); _iter1339 != this->group_names.end(); ++_iter1339) + std::vector ::const_iterator _iter1349; + for (_iter1349 = this->group_names.begin(); _iter1349 != this->group_names.end(); ++_iter1349) { - xfer += oprot->writeString((*_iter1339)); + xfer += oprot->writeString((*_iter1349)); } xfer += oprot->writeListEnd(); } @@ -29000,10 +29000,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1340; - for (_iter1340 = (*(this->group_names)).begin(); _iter1340 != (*(this->group_names)).end(); ++_iter1340) + std::vector ::const_iterator _iter1350; + for (_iter1350 = (*(this->group_names)).begin(); _iter1350 != (*(this->group_names)).end(); ++_iter1350) { - xfer += oprot->writeString((*_iter1340)); + xfer += oprot->writeString((*_iter1350)); } xfer += oprot->writeListEnd(); } @@ -29178,9 +29178,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1341; - xfer += iprot->readI32(ecast1341); - this->principal_type = (PrincipalType::type)ecast1341; + int32_t ecast1351; + xfer += iprot->readI32(ecast1351); + this->principal_type = (PrincipalType::type)ecast1351; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -29285,14 +29285,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1342; - ::apache::thrift::protocol::TType _etype1345; - xfer += iprot->readListBegin(_etype1345, _size1342); - this->success.resize(_size1342); - uint32_t _i1346; - for (_i1346 = 0; _i1346 < _size1342; ++_i1346) + uint32_t _size1352; + ::apache::thrift::protocol::TType _etype1355; + xfer += iprot->readListBegin(_etype1355, _size1352); + this->success.resize(_size1352); + uint32_t _i1356; + for (_i1356 = 0; _i1356 < _size1352; ++_i1356) { - xfer += this->success[_i1346].read(iprot); + xfer += this->success[_i1356].read(iprot); } xfer += iprot->readListEnd(); } @@ -29331,10 +29331,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1347; - for (_iter1347 = this->success.begin(); _iter1347 != this->success.end(); ++_iter1347) + std::vector ::const_iterator _iter1357; + for (_iter1357 = this->success.begin(); _iter1357 != this->success.end(); ++_iter1357) { - xfer += (*_iter1347).write(oprot); + xfer += (*_iter1357).write(oprot); } xfer += oprot->writeListEnd(); } @@ -29379,14 +29379,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1348; - ::apache::thrift::protocol::TType _etype1351; - xfer += iprot->readListBegin(_etype1351, _size1348); - (*(this->success)).resize(_size1348); - uint32_t _i1352; - for (_i1352 = 0; _i1352 < _size1348; ++_i1352) + uint32_t _size1358; + ::apache::thrift::protocol::TType _etype1361; + xfer += iprot->readListBegin(_etype1361, _size1358); + (*(this->success)).resize(_size1358); + uint32_t _i1362; + for (_i1362 = 0; _i1362 < _size1358; ++_i1362) { - xfer += (*(this->success))[_i1352].read(iprot); + xfer += (*(this->success))[_i1362].read(iprot); } xfer += iprot->readListEnd(); } @@ -30074,14 +30074,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1353; - ::apache::thrift::protocol::TType _etype1356; - xfer += iprot->readListBegin(_etype1356, _size1353); - this->group_names.resize(_size1353); - uint32_t _i1357; - for (_i1357 = 0; _i1357 < _size1353; ++_i1357) + uint32_t _size1363; + ::apache::thrift::protocol::TType _etype1366; + xfer += iprot->readListBegin(_etype1366, _size1363); + this->group_names.resize(_size1363); + uint32_t _i1367; + for (_i1367 = 0; _i1367 < _size1363; ++_i1367) { - xfer += iprot->readString(this->group_names[_i1357]); + xfer += iprot->readString(this->group_names[_i1367]); } xfer += iprot->readListEnd(); } @@ -30114,10 +30114,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1358; - for (_iter1358 = this->group_names.begin(); _iter1358 != this->group_names.end(); ++_iter1358) + std::vector ::const_iterator _iter1368; + for (_iter1368 = this->group_names.begin(); _iter1368 != this->group_names.end(); ++_iter1368) { - xfer += oprot->writeString((*_iter1358)); + xfer += oprot->writeString((*_iter1368)); } xfer += oprot->writeListEnd(); } @@ -30145,10 +30145,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1359; - for (_iter1359 = (*(this->group_names)).begin(); _iter1359 != (*(this->group_names)).end(); ++_iter1359) + std::vector ::const_iterator _iter1369; + for (_iter1369 = (*(this->group_names)).begin(); _iter1369 != (*(this->group_names)).end(); ++_iter1369) { - xfer += oprot->writeString((*_iter1359)); + xfer += oprot->writeString((*_iter1369)); } xfer += oprot->writeListEnd(); } @@ -30189,14 +30189,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1360; - ::apache::thrift::protocol::TType _etype1363; - xfer += iprot->readListBegin(_etype1363, _size1360); - this->success.resize(_size1360); - uint32_t _i1364; - for (_i1364 = 0; _i1364 < _size1360; ++_i1364) + uint32_t _size1370; + ::apache::thrift::protocol::TType _etype1373; + xfer += iprot->readListBegin(_etype1373, _size1370); + this->success.resize(_size1370); + uint32_t _i1374; + for (_i1374 = 0; _i1374 < _size1370; ++_i1374) { - xfer += iprot->readString(this->success[_i1364]); + xfer += iprot->readString(this->success[_i1374]); } xfer += iprot->readListEnd(); } @@ -30235,10 +30235,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1365; - for (_iter1365 = this->success.begin(); _iter1365 != this->success.end(); ++_iter1365) + std::vector ::const_iterator _iter1375; + for (_iter1375 = this->success.begin(); _iter1375 != this->success.end(); ++_iter1375) { - xfer += oprot->writeString((*_iter1365)); + xfer += oprot->writeString((*_iter1375)); } xfer += oprot->writeListEnd(); } @@ -30283,14 +30283,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1366; - ::apache::thrift::protocol::TType _etype1369; - xfer += iprot->readListBegin(_etype1369, _size1366); - (*(this->success)).resize(_size1366); - uint32_t _i1370; - for (_i1370 = 0; _i1370 < _size1366; ++_i1370) + uint32_t _size1376; + ::apache::thrift::protocol::TType _etype1379; + xfer += iprot->readListBegin(_etype1379, _size1376); + (*(this->success)).resize(_size1376); + uint32_t _i1380; + for (_i1380 = 0; _i1380 < _size1376; ++_i1380) { - xfer += iprot->readString((*(this->success))[_i1370]); + xfer += iprot->readString((*(this->success))[_i1380]); } xfer += iprot->readListEnd(); } @@ -31601,14 +31601,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1371; - ::apache::thrift::protocol::TType _etype1374; - xfer += iprot->readListBegin(_etype1374, _size1371); - this->success.resize(_size1371); - uint32_t _i1375; - for (_i1375 = 0; _i1375 < _size1371; ++_i1375) + uint32_t _size1381; + ::apache::thrift::protocol::TType _etype1384; + xfer += iprot->readListBegin(_etype1384, _size1381); + this->success.resize(_size1381); + uint32_t _i1385; + for (_i1385 = 0; _i1385 < _size1381; ++_i1385) { - xfer += iprot->readString(this->success[_i1375]); + xfer += iprot->readString(this->success[_i1385]); } xfer += iprot->readListEnd(); } @@ -31639,10 +31639,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1376; - for (_iter1376 = this->success.begin(); _iter1376 != this->success.end(); ++_iter1376) + std::vector ::const_iterator _iter1386; + for (_iter1386 = this->success.begin(); _iter1386 != this->success.end(); ++_iter1386) { - xfer += oprot->writeString((*_iter1376)); + xfer += oprot->writeString((*_iter1386)); } xfer += oprot->writeListEnd(); } @@ -31683,14 +31683,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1377; - ::apache::thrift::protocol::TType _etype1380; - xfer += iprot->readListBegin(_etype1380, _size1377); - (*(this->success)).resize(_size1377); - uint32_t _i1381; - for (_i1381 = 0; _i1381 < _size1377; ++_i1381) + uint32_t _size1387; + ::apache::thrift::protocol::TType _etype1390; + xfer += iprot->readListBegin(_etype1390, _size1387); + (*(this->success)).resize(_size1387); + uint32_t _i1391; + for (_i1391 = 0; _i1391 < _size1387; ++_i1391) { - xfer += iprot->readString((*(this->success))[_i1381]); + xfer += iprot->readString((*(this->success))[_i1391]); } xfer += iprot->readListEnd(); } @@ -32416,14 +32416,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1382; - ::apache::thrift::protocol::TType _etype1385; - xfer += iprot->readListBegin(_etype1385, _size1382); - this->success.resize(_size1382); - uint32_t _i1386; - for (_i1386 = 0; _i1386 < _size1382; ++_i1386) + uint32_t _size1392; + ::apache::thrift::protocol::TType _etype1395; + xfer += iprot->readListBegin(_etype1395, _size1392); + this->success.resize(_size1392); + uint32_t _i1396; + for (_i1396 = 0; _i1396 < _size1392; ++_i1396) { - xfer += iprot->readString(this->success[_i1386]); + xfer += iprot->readString(this->success[_i1396]); } xfer += iprot->readListEnd(); } @@ -32454,10 +32454,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1387; - for (_iter1387 = this->success.begin(); _iter1387 != this->success.end(); ++_iter1387) + std::vector ::const_iterator _iter1397; + for (_iter1397 = this->success.begin(); _iter1397 != this->success.end(); ++_iter1397) { - xfer += oprot->writeString((*_iter1387)); + xfer += oprot->writeString((*_iter1397)); } xfer += oprot->writeListEnd(); } @@ -32498,14 +32498,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1388; - ::apache::thrift::protocol::TType _etype1391; - xfer += iprot->readListBegin(_etype1391, _size1388); - (*(this->success)).resize(_size1388); - uint32_t _i1392; - for (_i1392 = 0; _i1392 < _size1388; ++_i1392) + uint32_t _size1398; + ::apache::thrift::protocol::TType _etype1401; + xfer += iprot->readListBegin(_etype1401, _size1398); + (*(this->success)).resize(_size1398); + uint32_t _i1402; + for (_i1402 = 0; _i1402 < _size1398; ++_i1402) { - xfer += iprot->readString((*(this->success))[_i1392]); + xfer += iprot->readString((*(this->success))[_i1402]); } xfer += iprot->readListEnd(); } @@ -37629,6 +37629,193 @@ uint32_t ThriftHiveMetastore_heartbeat_write_id_presult::read(::apache::thrift:: return xfer; } + +ThriftHiveMetastore_get_valid_write_ids_args::~ThriftHiveMetastore_get_valid_write_ids_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_valid_write_ids_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_valid_write_ids_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_valid_write_ids_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_valid_write_ids_pargs::~ThriftHiveMetastore_get_valid_write_ids_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_valid_write_ids_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_valid_write_ids_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_valid_write_ids_result::~ThriftHiveMetastore_get_valid_write_ids_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_valid_write_ids_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_valid_write_ids_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_valid_write_ids_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_valid_write_ids_presult::~ThriftHiveMetastore_get_valid_write_ids_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_valid_write_ids_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + void ThriftHiveMetastoreClient::getMetaConf(std::string& _return, const std::string& key) { send_getMetaConf(key); @@ -47247,6 +47434,64 @@ void ThriftHiveMetastoreClient::recv_heartbeat_write_id(HeartbeatWriteIdResult& throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_write_id failed: unknown result"); } +void ThriftHiveMetastoreClient::get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) +{ + send_get_valid_write_ids(req); + recv_get_valid_write_ids(_return); +} + +void ThriftHiveMetastoreClient::send_get_valid_write_ids(const GetValidWriteIdsRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_valid_write_ids", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_valid_write_ids_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_valid_write_ids(GetValidWriteIdsResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_valid_write_ids") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_valid_write_ids_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_valid_write_ids failed: unknown result"); +} + bool ThriftHiveMetastoreProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) { ProcessMap::iterator pfn; pfn = processMap_.find(fname); @@ -56246,6 +56491,60 @@ void ThriftHiveMetastoreProcessor::process_heartbeat_write_id(int32_t seqid, ::a } } +void ThriftHiveMetastoreProcessor::process_get_valid_write_ids(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_valid_write_ids", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_valid_write_ids"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_valid_write_ids"); + } + + ThriftHiveMetastore_get_valid_write_ids_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_valid_write_ids", bytes); + } + + ThriftHiveMetastore_get_valid_write_ids_result result; + try { + iface_->get_valid_write_ids(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_valid_write_ids"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_valid_write_ids", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_valid_write_ids"); + } + + oprot->writeMessageBegin("get_valid_write_ids", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_valid_write_ids", bytes); + } +} + ::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveMetastoreProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { ::apache::thrift::ReleaseHandler< ThriftHiveMetastoreIfFactory > cleanup(handlerFactory_); ::boost::shared_ptr< ThriftHiveMetastoreIf > handler(handlerFactory_->getHandler(connInfo), cleanup); @@ -70060,5 +70359,89 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_write_id(HeartbeatWrite } // end while(true) } +void ThriftHiveMetastoreConcurrentClient::get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) +{ + int32_t seqid = send_get_valid_write_ids(req); + recv_get_valid_write_ids(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_valid_write_ids(const GetValidWriteIdsRequest& req) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_valid_write_ids", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_valid_write_ids_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_valid_write_ids(GetValidWriteIdsResult& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_valid_write_ids") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_valid_write_ids_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_valid_write_ids failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + }}} // namespace diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index df555ec81021..573ec6bd0121 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -174,6 +174,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) = 0; virtual void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) = 0; virtual void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) = 0; + virtual void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) = 0; }; class ThriftHiveMetastoreIfFactory : virtual public ::facebook::fb303::FacebookServiceIfFactory { @@ -687,6 +688,9 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void heartbeat_write_id(HeartbeatWriteIdResult& /* _return */, const HeartbeatWriteIdRequest& /* req */) { return; } + void get_valid_write_ids(GetValidWriteIdsResult& /* _return */, const GetValidWriteIdsRequest& /* req */) { + return; + } }; typedef struct _ThriftHiveMetastore_getMetaConf_args__isset { @@ -19455,6 +19459,110 @@ class ThriftHiveMetastore_heartbeat_write_id_presult { }; +typedef struct _ThriftHiveMetastore_get_valid_write_ids_args__isset { + _ThriftHiveMetastore_get_valid_write_ids_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_get_valid_write_ids_args__isset; + +class ThriftHiveMetastore_get_valid_write_ids_args { + public: + + ThriftHiveMetastore_get_valid_write_ids_args(const ThriftHiveMetastore_get_valid_write_ids_args&); + ThriftHiveMetastore_get_valid_write_ids_args& operator=(const ThriftHiveMetastore_get_valid_write_ids_args&); + ThriftHiveMetastore_get_valid_write_ids_args() { + } + + virtual ~ThriftHiveMetastore_get_valid_write_ids_args() throw(); + GetValidWriteIdsRequest req; + + _ThriftHiveMetastore_get_valid_write_ids_args__isset __isset; + + void __set_req(const GetValidWriteIdsRequest& val); + + bool operator == (const ThriftHiveMetastore_get_valid_write_ids_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_valid_write_ids_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_valid_write_ids_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_valid_write_ids_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_valid_write_ids_pargs() throw(); + const GetValidWriteIdsRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_valid_write_ids_result__isset { + _ThriftHiveMetastore_get_valid_write_ids_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_valid_write_ids_result__isset; + +class ThriftHiveMetastore_get_valid_write_ids_result { + public: + + ThriftHiveMetastore_get_valid_write_ids_result(const ThriftHiveMetastore_get_valid_write_ids_result&); + ThriftHiveMetastore_get_valid_write_ids_result& operator=(const ThriftHiveMetastore_get_valid_write_ids_result&); + ThriftHiveMetastore_get_valid_write_ids_result() { + } + + virtual ~ThriftHiveMetastore_get_valid_write_ids_result() throw(); + GetValidWriteIdsResult success; + + _ThriftHiveMetastore_get_valid_write_ids_result__isset __isset; + + void __set_success(const GetValidWriteIdsResult& val); + + bool operator == (const ThriftHiveMetastore_get_valid_write_ids_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_valid_write_ids_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_valid_write_ids_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_valid_write_ids_presult__isset { + _ThriftHiveMetastore_get_valid_write_ids_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_valid_write_ids_presult__isset; + +class ThriftHiveMetastore_get_valid_write_ids_presult { + public: + + + virtual ~ThriftHiveMetastore_get_valid_write_ids_presult() throw(); + GetValidWriteIdsResult* success; + + _ThriftHiveMetastore_get_valid_write_ids_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public ::facebook::fb303::FacebookServiceClient { public: ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : @@ -19922,6 +20030,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req); void send_heartbeat_write_id(const HeartbeatWriteIdRequest& req); void recv_heartbeat_write_id(HeartbeatWriteIdResult& _return); + void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req); + void send_get_valid_write_ids(const GetValidWriteIdsRequest& req); + void recv_get_valid_write_ids(GetValidWriteIdsResult& _return); }; class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceProcessor { @@ -20084,6 +20195,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_get_next_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_finalize_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_heartbeat_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_valid_write_ids(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); public: ThriftHiveMetastoreProcessor(boost::shared_ptr iface) : ::facebook::fb303::FacebookServiceProcessor(iface), @@ -20240,6 +20352,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["get_next_write_id"] = &ThriftHiveMetastoreProcessor::process_get_next_write_id; processMap_["finalize_write_id"] = &ThriftHiveMetastoreProcessor::process_finalize_write_id; processMap_["heartbeat_write_id"] = &ThriftHiveMetastoreProcessor::process_heartbeat_write_id; + processMap_["get_valid_write_ids"] = &ThriftHiveMetastoreProcessor::process_get_valid_write_ids; } virtual ~ThriftHiveMetastoreProcessor() {} @@ -21730,6 +21843,16 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } + void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_valid_write_ids(_return, req); + } + ifaces_[i]->get_valid_write_ids(_return, req); + return; + } + }; // The 'concurrent' client is a thread safe client that correctly handles @@ -22202,6 +22325,9 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req); int32_t send_heartbeat_write_id(const HeartbeatWriteIdRequest& req); void recv_heartbeat_write_id(HeartbeatWriteIdResult& _return, const int32_t seqid); + void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req); + int32_t send_get_valid_write_ids(const GetValidWriteIdsRequest& req); + void recv_get_valid_write_ids(GetValidWriteIdsResult& _return, const int32_t seqid); }; #ifdef _WIN32 diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 317598e80ea9..f938da4baec9 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -782,6 +782,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("heartbeat_write_id\n"); } + void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) { + // Your implementation goes here + printf("get_valid_write_ids\n"); + } + }; int main(int argc, char **argv) { diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index bdfa35bcd58b..356477efc526 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -18260,6 +18260,291 @@ void HeartbeatWriteIdResult::printTo(std::ostream& out) const { } +GetValidWriteIdsRequest::~GetValidWriteIdsRequest() throw() { +} + + +void GetValidWriteIdsRequest::__set_dbName(const std::string& val) { + this->dbName = val; +} + +void GetValidWriteIdsRequest::__set_tblName(const std::string& val) { + this->tblName = val; +} + +uint32_t GetValidWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_dbName = false; + bool isset_tblName = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tblName); + isset_tblName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tblName) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetValidWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetValidWriteIdsRequest"); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tblName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tblName, b.tblName); +} + +GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other751) { + dbName = other751.dbName; + tblName = other751.tblName; +} +GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other752) { + dbName = other752.dbName; + tblName = other752.tblName; + return *this; +} +void GetValidWriteIdsRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetValidWriteIdsRequest("; + out << "dbName=" << to_string(dbName); + out << ", " << "tblName=" << to_string(tblName); + out << ")"; +} + + +GetValidWriteIdsResult::~GetValidWriteIdsResult() throw() { +} + + +void GetValidWriteIdsResult::__set_lowWatermarkId(const int64_t val) { + this->lowWatermarkId = val; +} + +void GetValidWriteIdsResult::__set_highWatermarkId(const int64_t val) { + this->highWatermarkId = val; +} + +void GetValidWriteIdsResult::__set_areIdsValid(const bool val) { + this->areIdsValid = val; +__isset.areIdsValid = true; +} + +void GetValidWriteIdsResult::__set_ids(const std::vector & val) { + this->ids = val; +__isset.ids = true; +} + +uint32_t GetValidWriteIdsResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_lowWatermarkId = false; + bool isset_highWatermarkId = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->lowWatermarkId); + isset_lowWatermarkId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->highWatermarkId); + isset_highWatermarkId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->areIdsValid); + this->__isset.areIdsValid = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->ids.clear(); + uint32_t _size753; + ::apache::thrift::protocol::TType _etype756; + xfer += iprot->readListBegin(_etype756, _size753); + this->ids.resize(_size753); + uint32_t _i757; + for (_i757 = 0; _i757 < _size753; ++_i757) + { + xfer += iprot->readI64(this->ids[_i757]); + } + xfer += iprot->readListEnd(); + } + this->__isset.ids = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_lowWatermarkId) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_highWatermarkId) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetValidWriteIdsResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetValidWriteIdsResult"); + + xfer += oprot->writeFieldBegin("lowWatermarkId", ::apache::thrift::protocol::T_I64, 1); + xfer += oprot->writeI64(this->lowWatermarkId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("highWatermarkId", ::apache::thrift::protocol::T_I64, 2); + xfer += oprot->writeI64(this->highWatermarkId); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.areIdsValid) { + xfer += oprot->writeFieldBegin("areIdsValid", ::apache::thrift::protocol::T_BOOL, 3); + xfer += oprot->writeBool(this->areIdsValid); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.ids) { + xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_LIST, 4); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->ids.size())); + std::vector ::const_iterator _iter758; + for (_iter758 = this->ids.begin(); _iter758 != this->ids.end(); ++_iter758) + { + xfer += oprot->writeI64((*_iter758)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetValidWriteIdsResult &a, GetValidWriteIdsResult &b) { + using ::std::swap; + swap(a.lowWatermarkId, b.lowWatermarkId); + swap(a.highWatermarkId, b.highWatermarkId); + swap(a.areIdsValid, b.areIdsValid); + swap(a.ids, b.ids); + swap(a.__isset, b.__isset); +} + +GetValidWriteIdsResult::GetValidWriteIdsResult(const GetValidWriteIdsResult& other759) { + lowWatermarkId = other759.lowWatermarkId; + highWatermarkId = other759.highWatermarkId; + areIdsValid = other759.areIdsValid; + ids = other759.ids; + __isset = other759.__isset; +} +GetValidWriteIdsResult& GetValidWriteIdsResult::operator=(const GetValidWriteIdsResult& other760) { + lowWatermarkId = other760.lowWatermarkId; + highWatermarkId = other760.highWatermarkId; + areIdsValid = other760.areIdsValid; + ids = other760.ids; + __isset = other760.__isset; + return *this; +} +void GetValidWriteIdsResult::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetValidWriteIdsResult("; + out << "lowWatermarkId=" << to_string(lowWatermarkId); + out << ", " << "highWatermarkId=" << to_string(highWatermarkId); + out << ", " << "areIdsValid="; (__isset.areIdsValid ? (out << to_string(areIdsValid)) : (out << "")); + out << ", " << "ids="; (__isset.ids ? (out << to_string(ids)) : (out << "")); + out << ")"; +} + + GetAllFunctionsResponse::~GetAllFunctionsResponse() throw() { } @@ -18294,14 +18579,14 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->functions.clear(); - uint32_t _size751; - ::apache::thrift::protocol::TType _etype754; - xfer += iprot->readListBegin(_etype754, _size751); - this->functions.resize(_size751); - uint32_t _i755; - for (_i755 = 0; _i755 < _size751; ++_i755) + uint32_t _size761; + ::apache::thrift::protocol::TType _etype764; + xfer += iprot->readListBegin(_etype764, _size761); + this->functions.resize(_size761); + uint32_t _i765; + for (_i765 = 0; _i765 < _size761; ++_i765) { - xfer += this->functions[_i755].read(iprot); + xfer += this->functions[_i765].read(iprot); } xfer += iprot->readListEnd(); } @@ -18331,10 +18616,10 @@ uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter756; - for (_iter756 = this->functions.begin(); _iter756 != this->functions.end(); ++_iter756) + std::vector ::const_iterator _iter766; + for (_iter766 = this->functions.begin(); _iter766 != this->functions.end(); ++_iter766) { - xfer += (*_iter756).write(oprot); + xfer += (*_iter766).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18351,13 +18636,13 @@ void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { swap(a.__isset, b.__isset); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other757) { - functions = other757.functions; - __isset = other757.__isset; +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other767) { + functions = other767.functions; + __isset = other767.__isset; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other758) { - functions = other758.functions; - __isset = other758.__isset; +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other768) { + functions = other768.functions; + __isset = other768.__isset; return *this; } void GetAllFunctionsResponse::printTo(std::ostream& out) const { @@ -18499,19 +18784,19 @@ void swap(TableMeta &a, TableMeta &b) { swap(a.__isset, b.__isset); } -TableMeta::TableMeta(const TableMeta& other759) { - dbName = other759.dbName; - tableName = other759.tableName; - tableType = other759.tableType; - comments = other759.comments; - __isset = other759.__isset; +TableMeta::TableMeta(const TableMeta& other769) { + dbName = other769.dbName; + tableName = other769.tableName; + tableType = other769.tableType; + comments = other769.comments; + __isset = other769.__isset; } -TableMeta& TableMeta::operator=(const TableMeta& other760) { - dbName = other760.dbName; - tableName = other760.tableName; - tableType = other760.tableType; - comments = other760.comments; - __isset = other760.__isset; +TableMeta& TableMeta::operator=(const TableMeta& other770) { + dbName = other770.dbName; + tableName = other770.tableName; + tableType = other770.tableType; + comments = other770.comments; + __isset = other770.__isset; return *this; } void TableMeta::printTo(std::ostream& out) const { @@ -18594,13 +18879,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other761) : TException() { - message = other761.message; - __isset = other761.__isset; +MetaException::MetaException(const MetaException& other771) : TException() { + message = other771.message; + __isset = other771.__isset; } -MetaException& MetaException::operator=(const MetaException& other762) { - message = other762.message; - __isset = other762.__isset; +MetaException& MetaException::operator=(const MetaException& other772) { + message = other772.message; + __isset = other772.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -18691,13 +18976,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other763) : TException() { - message = other763.message; - __isset = other763.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other773) : TException() { + message = other773.message; + __isset = other773.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other764) { - message = other764.message; - __isset = other764.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other774) { + message = other774.message; + __isset = other774.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -18788,13 +19073,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other765) : TException() { - message = other765.message; - __isset = other765.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other775) : TException() { + message = other775.message; + __isset = other775.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other766) { - message = other766.message; - __isset = other766.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other776) { + message = other776.message; + __isset = other776.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -18885,13 +19170,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other767) : TException() { - message = other767.message; - __isset = other767.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other777) : TException() { + message = other777.message; + __isset = other777.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other768) { - message = other768.message; - __isset = other768.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other778) { + message = other778.message; + __isset = other778.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -18982,13 +19267,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other769) : TException() { - message = other769.message; - __isset = other769.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other779) : TException() { + message = other779.message; + __isset = other779.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other770) { - message = other770.message; - __isset = other770.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other780) { + message = other780.message; + __isset = other780.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -19079,13 +19364,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other771) : TException() { - message = other771.message; - __isset = other771.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other781) : TException() { + message = other781.message; + __isset = other781.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other772) { - message = other772.message; - __isset = other772.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other782) { + message = other782.message; + __isset = other782.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -19176,13 +19461,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other773) : TException() { - message = other773.message; - __isset = other773.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other783) : TException() { + message = other783.message; + __isset = other783.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other774) { - message = other774.message; - __isset = other774.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other784) { + message = other784.message; + __isset = other784.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -19273,13 +19558,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other775) : TException() { - message = other775.message; - __isset = other775.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other785) : TException() { + message = other785.message; + __isset = other785.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other776) { - message = other776.message; - __isset = other776.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other786) { + message = other786.message; + __isset = other786.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -19370,13 +19655,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other777) : TException() { - message = other777.message; - __isset = other777.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other787) : TException() { + message = other787.message; + __isset = other787.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other778) { - message = other778.message; - __isset = other778.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other788) { + message = other788.message; + __isset = other788.__isset; return *this; } void IndexAlreadyExistsException::printTo(std::ostream& out) const { @@ -19467,13 +19752,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other779) : TException() { - message = other779.message; - __isset = other779.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other789) : TException() { + message = other789.message; + __isset = other789.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other780) { - message = other780.message; - __isset = other780.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other790) { + message = other790.message; + __isset = other790.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -19564,13 +19849,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other781) : TException() { - message = other781.message; - __isset = other781.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other791) : TException() { + message = other791.message; + __isset = other791.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other782) { - message = other782.message; - __isset = other782.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other792) { + message = other792.message; + __isset = other792.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -19661,13 +19946,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other783) : TException() { - message = other783.message; - __isset = other783.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other793) : TException() { + message = other793.message; + __isset = other793.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other784) { - message = other784.message; - __isset = other784.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other794) { + message = other794.message; + __isset = other794.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -19758,13 +20043,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other785) : TException() { - message = other785.message; - __isset = other785.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other795) : TException() { + message = other795.message; + __isset = other795.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other786) { - message = other786.message; - __isset = other786.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other796) { + message = other796.message; + __isset = other796.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -19855,13 +20140,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other787) : TException() { - message = other787.message; - __isset = other787.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other797) : TException() { + message = other797.message; + __isset = other797.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other788) { - message = other788.message; - __isset = other788.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other798) { + message = other798.message; + __isset = other798.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -19952,13 +20237,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other789) : TException() { - message = other789.message; - __isset = other789.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other799) : TException() { + message = other799.message; + __isset = other799.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other790) { - message = other790.message; - __isset = other790.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other800) { + message = other800.message; + __isset = other800.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -20049,13 +20334,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other791) : TException() { - message = other791.message; - __isset = other791.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other801) : TException() { + message = other801.message; + __isset = other801.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other792) { - message = other792.message; - __isset = other792.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other802) { + message = other802.message; + __isset = other802.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index beddd4cb3eec..b510dc9d0ae7 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -396,6 +396,10 @@ class HeartbeatWriteIdRequest; class HeartbeatWriteIdResult; +class GetValidWriteIdsRequest; + +class GetValidWriteIdsResult; + class GetAllFunctionsResponse; class TableMeta; @@ -7445,6 +7449,117 @@ inline std::ostream& operator<<(std::ostream& out, const HeartbeatWriteIdResult& return out; } + +class GetValidWriteIdsRequest { + public: + + GetValidWriteIdsRequest(const GetValidWriteIdsRequest&); + GetValidWriteIdsRequest& operator=(const GetValidWriteIdsRequest&); + GetValidWriteIdsRequest() : dbName(), tblName() { + } + + virtual ~GetValidWriteIdsRequest() throw(); + std::string dbName; + std::string tblName; + + void __set_dbName(const std::string& val); + + void __set_tblName(const std::string& val); + + bool operator == (const GetValidWriteIdsRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tblName == rhs.tblName)) + return false; + return true; + } + bool operator != (const GetValidWriteIdsRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetValidWriteIdsRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const GetValidWriteIdsRequest& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _GetValidWriteIdsResult__isset { + _GetValidWriteIdsResult__isset() : areIdsValid(false), ids(false) {} + bool areIdsValid :1; + bool ids :1; +} _GetValidWriteIdsResult__isset; + +class GetValidWriteIdsResult { + public: + + GetValidWriteIdsResult(const GetValidWriteIdsResult&); + GetValidWriteIdsResult& operator=(const GetValidWriteIdsResult&); + GetValidWriteIdsResult() : lowWatermarkId(0), highWatermarkId(0), areIdsValid(0) { + } + + virtual ~GetValidWriteIdsResult() throw(); + int64_t lowWatermarkId; + int64_t highWatermarkId; + bool areIdsValid; + std::vector ids; + + _GetValidWriteIdsResult__isset __isset; + + void __set_lowWatermarkId(const int64_t val); + + void __set_highWatermarkId(const int64_t val); + + void __set_areIdsValid(const bool val); + + void __set_ids(const std::vector & val); + + bool operator == (const GetValidWriteIdsResult & rhs) const + { + if (!(lowWatermarkId == rhs.lowWatermarkId)) + return false; + if (!(highWatermarkId == rhs.highWatermarkId)) + return false; + if (__isset.areIdsValid != rhs.__isset.areIdsValid) + return false; + else if (__isset.areIdsValid && !(areIdsValid == rhs.areIdsValid)) + return false; + if (__isset.ids != rhs.__isset.ids) + return false; + else if (__isset.ids && !(ids == rhs.ids)) + return false; + return true; + } + bool operator != (const GetValidWriteIdsResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetValidWriteIdsResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetValidWriteIdsResult &a, GetValidWriteIdsResult &b); + +inline std::ostream& operator<<(std::ostream& out, const GetValidWriteIdsResult& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _GetAllFunctionsResponse__isset { _GetAllFunctionsResponse__isset() : functions(false) {} bool functions :1; diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index f427a3a393f9..49a1be25bf32 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list634 = iprot.readListBegin(); - struct.functions = new ArrayList(_list634.size); - Function _elem635; - for (int _i636 = 0; _i636 < _list634.size; ++_i636) + org.apache.thrift.protocol.TList _list642 = iprot.readListBegin(); + struct.functions = new ArrayList(_list642.size); + Function _elem643; + for (int _i644 = 0; _i644 < _list642.size; ++_i644) { - _elem635 = new Function(); - _elem635.read(iprot); - struct.functions.add(_elem635); + _elem643 = new Function(); + _elem643.read(iprot); + struct.functions.add(_elem643); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter637 : struct.functions) + for (Function _iter645 : struct.functions) { - _iter637.write(oprot); + _iter645.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter638 : struct.functions) + for (Function _iter646 : struct.functions) { - _iter638.write(oprot); + _iter646.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list639 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list639.size); - Function _elem640; - for (int _i641 = 0; _i641 < _list639.size; ++_i641) + org.apache.thrift.protocol.TList _list647 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list647.size); + Function _elem648; + for (int _i649 = 0; _i649 < _list647.size; ++_i649) { - _elem640 = new Function(); - _elem640.read(iprot); - struct.functions.add(_elem640); + _elem648 = new Function(); + _elem648.read(iprot); + struct.functions.add(_elem648); } } struct.setFunctionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java new file mode 100644 index 000000000000..90f103a4377d --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java @@ -0,0 +1,490 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class GetValidWriteIdsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetValidWriteIdsRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetValidWriteIdsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetValidWriteIdsRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tblName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TBL_NAME((short)2, "tblName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetValidWriteIdsRequest.class, metaDataMap); + } + + public GetValidWriteIdsRequest() { + } + + public GetValidWriteIdsRequest( + String dbName, + String tblName) + { + this(); + this.dbName = dbName; + this.tblName = tblName; + } + + /** + * Performs a deep copy on other. + */ + public GetValidWriteIdsRequest(GetValidWriteIdsRequest other) { + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTblName()) { + this.tblName = other.tblName; + } + } + + public GetValidWriteIdsRequest deepCopy() { + return new GetValidWriteIdsRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tblName = null; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTblName() { + return this.tblName; + } + + public void setTblName(String tblName) { + this.tblName = tblName; + } + + public void unsetTblName() { + this.tblName = null; + } + + /** Returns true if field tblName is set (has been assigned a value) and false otherwise */ + public boolean isSetTblName() { + return this.tblName != null; + } + + public void setTblNameIsSet(boolean value) { + if (!value) { + this.tblName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTblName(); + } else { + setTblName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TBL_NAME: + return getTblName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TBL_NAME: + return isSetTblName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetValidWriteIdsRequest) + return this.equals((GetValidWriteIdsRequest)that); + return false; + } + + public boolean equals(GetValidWriteIdsRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tblName = true && this.isSetTblName(); + boolean that_present_tblName = true && that.isSetTblName(); + if (this_present_tblName || that_present_tblName) { + if (!(this_present_tblName && that_present_tblName)) + return false; + if (!this.tblName.equals(that.tblName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tblName = true && (isSetTblName()); + list.add(present_tblName); + if (present_tblName) + list.add(tblName); + + return list.hashCode(); + } + + @Override + public int compareTo(GetValidWriteIdsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTblName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetValidWriteIdsRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tblName:"); + if (this.tblName == null) { + sb.append("null"); + } else { + sb.append(this.tblName); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTblName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetValidWriteIdsRequestStandardSchemeFactory implements SchemeFactory { + public GetValidWriteIdsRequestStandardScheme getScheme() { + return new GetValidWriteIdsRequestStandardScheme(); + } + } + + private static class GetValidWriteIdsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tblName != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tblName); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetValidWriteIdsRequestTupleSchemeFactory implements SchemeFactory { + public GetValidWriteIdsRequestTupleScheme getScheme() { + return new GetValidWriteIdsRequestTupleScheme(); + } + } + + private static class GetValidWriteIdsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tblName); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java new file mode 100644 index 000000000000..a51f321fa2a3 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java @@ -0,0 +1,740 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class GetValidWriteIdsResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetValidWriteIdsResult"); + + private static final org.apache.thrift.protocol.TField LOW_WATERMARK_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("lowWatermarkId", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField HIGH_WATERMARK_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("highWatermarkId", org.apache.thrift.protocol.TType.I64, (short)2); + private static final org.apache.thrift.protocol.TField ARE_IDS_VALID_FIELD_DESC = new org.apache.thrift.protocol.TField("areIdsValid", org.apache.thrift.protocol.TType.BOOL, (short)3); + private static final org.apache.thrift.protocol.TField IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("ids", org.apache.thrift.protocol.TType.LIST, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetValidWriteIdsResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetValidWriteIdsResultTupleSchemeFactory()); + } + + private long lowWatermarkId; // required + private long highWatermarkId; // required + private boolean areIdsValid; // optional + private List ids; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + LOW_WATERMARK_ID((short)1, "lowWatermarkId"), + HIGH_WATERMARK_ID((short)2, "highWatermarkId"), + ARE_IDS_VALID((short)3, "areIdsValid"), + IDS((short)4, "ids"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // LOW_WATERMARK_ID + return LOW_WATERMARK_ID; + case 2: // HIGH_WATERMARK_ID + return HIGH_WATERMARK_ID; + case 3: // ARE_IDS_VALID + return ARE_IDS_VALID; + case 4: // IDS + return IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __LOWWATERMARKID_ISSET_ID = 0; + private static final int __HIGHWATERMARKID_ISSET_ID = 1; + private static final int __AREIDSVALID_ISSET_ID = 2; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.ARE_IDS_VALID,_Fields.IDS}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.LOW_WATERMARK_ID, new org.apache.thrift.meta_data.FieldMetaData("lowWatermarkId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.HIGH_WATERMARK_ID, new org.apache.thrift.meta_data.FieldMetaData("highWatermarkId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.ARE_IDS_VALID, new org.apache.thrift.meta_data.FieldMetaData("areIdsValid", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.IDS, new org.apache.thrift.meta_data.FieldMetaData("ids", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetValidWriteIdsResult.class, metaDataMap); + } + + public GetValidWriteIdsResult() { + } + + public GetValidWriteIdsResult( + long lowWatermarkId, + long highWatermarkId) + { + this(); + this.lowWatermarkId = lowWatermarkId; + setLowWatermarkIdIsSet(true); + this.highWatermarkId = highWatermarkId; + setHighWatermarkIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public GetValidWriteIdsResult(GetValidWriteIdsResult other) { + __isset_bitfield = other.__isset_bitfield; + this.lowWatermarkId = other.lowWatermarkId; + this.highWatermarkId = other.highWatermarkId; + this.areIdsValid = other.areIdsValid; + if (other.isSetIds()) { + List __this__ids = new ArrayList(other.ids); + this.ids = __this__ids; + } + } + + public GetValidWriteIdsResult deepCopy() { + return new GetValidWriteIdsResult(this); + } + + @Override + public void clear() { + setLowWatermarkIdIsSet(false); + this.lowWatermarkId = 0; + setHighWatermarkIdIsSet(false); + this.highWatermarkId = 0; + setAreIdsValidIsSet(false); + this.areIdsValid = false; + this.ids = null; + } + + public long getLowWatermarkId() { + return this.lowWatermarkId; + } + + public void setLowWatermarkId(long lowWatermarkId) { + this.lowWatermarkId = lowWatermarkId; + setLowWatermarkIdIsSet(true); + } + + public void unsetLowWatermarkId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOWWATERMARKID_ISSET_ID); + } + + /** Returns true if field lowWatermarkId is set (has been assigned a value) and false otherwise */ + public boolean isSetLowWatermarkId() { + return EncodingUtils.testBit(__isset_bitfield, __LOWWATERMARKID_ISSET_ID); + } + + public void setLowWatermarkIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOWWATERMARKID_ISSET_ID, value); + } + + public long getHighWatermarkId() { + return this.highWatermarkId; + } + + public void setHighWatermarkId(long highWatermarkId) { + this.highWatermarkId = highWatermarkId; + setHighWatermarkIdIsSet(true); + } + + public void unsetHighWatermarkId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HIGHWATERMARKID_ISSET_ID); + } + + /** Returns true if field highWatermarkId is set (has been assigned a value) and false otherwise */ + public boolean isSetHighWatermarkId() { + return EncodingUtils.testBit(__isset_bitfield, __HIGHWATERMARKID_ISSET_ID); + } + + public void setHighWatermarkIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HIGHWATERMARKID_ISSET_ID, value); + } + + public boolean isAreIdsValid() { + return this.areIdsValid; + } + + public void setAreIdsValid(boolean areIdsValid) { + this.areIdsValid = areIdsValid; + setAreIdsValidIsSet(true); + } + + public void unsetAreIdsValid() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AREIDSVALID_ISSET_ID); + } + + /** Returns true if field areIdsValid is set (has been assigned a value) and false otherwise */ + public boolean isSetAreIdsValid() { + return EncodingUtils.testBit(__isset_bitfield, __AREIDSVALID_ISSET_ID); + } + + public void setAreIdsValidIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AREIDSVALID_ISSET_ID, value); + } + + public int getIdsSize() { + return (this.ids == null) ? 0 : this.ids.size(); + } + + public java.util.Iterator getIdsIterator() { + return (this.ids == null) ? null : this.ids.iterator(); + } + + public void addToIds(long elem) { + if (this.ids == null) { + this.ids = new ArrayList(); + } + this.ids.add(elem); + } + + public List getIds() { + return this.ids; + } + + public void setIds(List ids) { + this.ids = ids; + } + + public void unsetIds() { + this.ids = null; + } + + /** Returns true if field ids is set (has been assigned a value) and false otherwise */ + public boolean isSetIds() { + return this.ids != null; + } + + public void setIdsIsSet(boolean value) { + if (!value) { + this.ids = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case LOW_WATERMARK_ID: + if (value == null) { + unsetLowWatermarkId(); + } else { + setLowWatermarkId((Long)value); + } + break; + + case HIGH_WATERMARK_ID: + if (value == null) { + unsetHighWatermarkId(); + } else { + setHighWatermarkId((Long)value); + } + break; + + case ARE_IDS_VALID: + if (value == null) { + unsetAreIdsValid(); + } else { + setAreIdsValid((Boolean)value); + } + break; + + case IDS: + if (value == null) { + unsetIds(); + } else { + setIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case LOW_WATERMARK_ID: + return getLowWatermarkId(); + + case HIGH_WATERMARK_ID: + return getHighWatermarkId(); + + case ARE_IDS_VALID: + return isAreIdsValid(); + + case IDS: + return getIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case LOW_WATERMARK_ID: + return isSetLowWatermarkId(); + case HIGH_WATERMARK_ID: + return isSetHighWatermarkId(); + case ARE_IDS_VALID: + return isSetAreIdsValid(); + case IDS: + return isSetIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetValidWriteIdsResult) + return this.equals((GetValidWriteIdsResult)that); + return false; + } + + public boolean equals(GetValidWriteIdsResult that) { + if (that == null) + return false; + + boolean this_present_lowWatermarkId = true; + boolean that_present_lowWatermarkId = true; + if (this_present_lowWatermarkId || that_present_lowWatermarkId) { + if (!(this_present_lowWatermarkId && that_present_lowWatermarkId)) + return false; + if (this.lowWatermarkId != that.lowWatermarkId) + return false; + } + + boolean this_present_highWatermarkId = true; + boolean that_present_highWatermarkId = true; + if (this_present_highWatermarkId || that_present_highWatermarkId) { + if (!(this_present_highWatermarkId && that_present_highWatermarkId)) + return false; + if (this.highWatermarkId != that.highWatermarkId) + return false; + } + + boolean this_present_areIdsValid = true && this.isSetAreIdsValid(); + boolean that_present_areIdsValid = true && that.isSetAreIdsValid(); + if (this_present_areIdsValid || that_present_areIdsValid) { + if (!(this_present_areIdsValid && that_present_areIdsValid)) + return false; + if (this.areIdsValid != that.areIdsValid) + return false; + } + + boolean this_present_ids = true && this.isSetIds(); + boolean that_present_ids = true && that.isSetIds(); + if (this_present_ids || that_present_ids) { + if (!(this_present_ids && that_present_ids)) + return false; + if (!this.ids.equals(that.ids)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_lowWatermarkId = true; + list.add(present_lowWatermarkId); + if (present_lowWatermarkId) + list.add(lowWatermarkId); + + boolean present_highWatermarkId = true; + list.add(present_highWatermarkId); + if (present_highWatermarkId) + list.add(highWatermarkId); + + boolean present_areIdsValid = true && (isSetAreIdsValid()); + list.add(present_areIdsValid); + if (present_areIdsValid) + list.add(areIdsValid); + + boolean present_ids = true && (isSetIds()); + list.add(present_ids); + if (present_ids) + list.add(ids); + + return list.hashCode(); + } + + @Override + public int compareTo(GetValidWriteIdsResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetLowWatermarkId()).compareTo(other.isSetLowWatermarkId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLowWatermarkId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lowWatermarkId, other.lowWatermarkId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetHighWatermarkId()).compareTo(other.isSetHighWatermarkId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetHighWatermarkId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.highWatermarkId, other.highWatermarkId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetAreIdsValid()).compareTo(other.isSetAreIdsValid()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetAreIdsValid()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.areIdsValid, other.areIdsValid); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIds()).compareTo(other.isSetIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ids, other.ids); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetValidWriteIdsResult("); + boolean first = true; + + sb.append("lowWatermarkId:"); + sb.append(this.lowWatermarkId); + first = false; + if (!first) sb.append(", "); + sb.append("highWatermarkId:"); + sb.append(this.highWatermarkId); + first = false; + if (isSetAreIdsValid()) { + if (!first) sb.append(", "); + sb.append("areIdsValid:"); + sb.append(this.areIdsValid); + first = false; + } + if (isSetIds()) { + if (!first) sb.append(", "); + sb.append("ids:"); + if (this.ids == null) { + sb.append("null"); + } else { + sb.append(this.ids); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetLowWatermarkId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'lowWatermarkId' is unset! Struct:" + toString()); + } + + if (!isSetHighWatermarkId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'highWatermarkId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetValidWriteIdsResultStandardSchemeFactory implements SchemeFactory { + public GetValidWriteIdsResultStandardScheme getScheme() { + return new GetValidWriteIdsResultStandardScheme(); + } + } + + private static class GetValidWriteIdsResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // LOW_WATERMARK_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.lowWatermarkId = iprot.readI64(); + struct.setLowWatermarkIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // HIGH_WATERMARK_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.highWatermarkId = iprot.readI64(); + struct.setHighWatermarkIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // ARE_IDS_VALID + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.areIdsValid = iprot.readBool(); + struct.setAreIdsValidIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list634 = iprot.readListBegin(); + struct.ids = new ArrayList(_list634.size); + long _elem635; + for (int _i636 = 0; _i636 < _list634.size; ++_i636) + { + _elem635 = iprot.readI64(); + struct.ids.add(_elem635); + } + iprot.readListEnd(); + } + struct.setIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(LOW_WATERMARK_ID_FIELD_DESC); + oprot.writeI64(struct.lowWatermarkId); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(HIGH_WATERMARK_ID_FIELD_DESC); + oprot.writeI64(struct.highWatermarkId); + oprot.writeFieldEnd(); + if (struct.isSetAreIdsValid()) { + oprot.writeFieldBegin(ARE_IDS_VALID_FIELD_DESC); + oprot.writeBool(struct.areIdsValid); + oprot.writeFieldEnd(); + } + if (struct.ids != null) { + if (struct.isSetIds()) { + oprot.writeFieldBegin(IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.ids.size())); + for (long _iter637 : struct.ids) + { + oprot.writeI64(_iter637); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetValidWriteIdsResultTupleSchemeFactory implements SchemeFactory { + public GetValidWriteIdsResultTupleScheme getScheme() { + return new GetValidWriteIdsResultTupleScheme(); + } + } + + private static class GetValidWriteIdsResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.lowWatermarkId); + oprot.writeI64(struct.highWatermarkId); + BitSet optionals = new BitSet(); + if (struct.isSetAreIdsValid()) { + optionals.set(0); + } + if (struct.isSetIds()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetAreIdsValid()) { + oprot.writeBool(struct.areIdsValid); + } + if (struct.isSetIds()) { + { + oprot.writeI32(struct.ids.size()); + for (long _iter638 : struct.ids) + { + oprot.writeI64(_iter638); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.lowWatermarkId = iprot.readI64(); + struct.setLowWatermarkIdIsSet(true); + struct.highWatermarkId = iprot.readI64(); + struct.setHighWatermarkIdIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.areIdsValid = iprot.readBool(); + struct.setAreIdsValidIsSet(true); + } + if (incoming.get(1)) { + { + org.apache.thrift.protocol.TList _list639 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.ids = new ArrayList(_list639.size); + long _elem640; + for (int _i641 = 0; _i641 < _list639.size; ++_i641) + { + _elem640 = iprot.readI64(); + struct.ids.add(_elem640); + } + } + struct.setIdsIsSet(true); + } + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 40907b39600e..4134483ac45a 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -346,6 +346,8 @@ public interface Iface extends com.facebook.fb303.FacebookService.Iface { public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) throws org.apache.thrift.TException; + public GetValidWriteIdsResult get_valid_write_ids(GetValidWriteIdsRequest req) throws org.apache.thrift.TException; + } public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface { @@ -654,6 +656,8 @@ public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIfa public void heartbeat_write_id(HeartbeatWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_valid_write_ids(GetValidWriteIdsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface { @@ -5044,6 +5048,29 @@ public HeartbeatWriteIdResult recv_heartbeat_write_id() throws org.apache.thrift throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "heartbeat_write_id failed: unknown result"); } + public GetValidWriteIdsResult get_valid_write_ids(GetValidWriteIdsRequest req) throws org.apache.thrift.TException + { + send_get_valid_write_ids(req); + return recv_get_valid_write_ids(); + } + + public void send_get_valid_write_ids(GetValidWriteIdsRequest req) throws org.apache.thrift.TException + { + get_valid_write_ids_args args = new get_valid_write_ids_args(); + args.setReq(req); + sendBase("get_valid_write_ids", args); + } + + public GetValidWriteIdsResult recv_get_valid_write_ids() throws org.apache.thrift.TException + { + get_valid_write_ids_result result = new get_valid_write_ids_result(); + receiveBase(result, "get_valid_write_ids"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_valid_write_ids failed: unknown result"); + } + } public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface { public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -10409,6 +10436,38 @@ public HeartbeatWriteIdResult getResult() throws org.apache.thrift.TException { } } + public void get_valid_write_ids(GetValidWriteIdsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_valid_write_ids_call method_call = new get_valid_write_ids_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_valid_write_ids_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetValidWriteIdsRequest req; + public get_valid_write_ids_call(GetValidWriteIdsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_valid_write_ids", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_valid_write_ids_args args = new get_valid_write_ids_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public GetValidWriteIdsResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_valid_write_ids(); + } + } + } public static class Processor extends com.facebook.fb303.FacebookService.Processor implements org.apache.thrift.TProcessor { @@ -10574,6 +10633,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public get_valid_write_ids() { + super("get_valid_write_ids"); + } + + public get_valid_write_ids_args getEmptyArgsInstance() { + return new get_valid_write_ids_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_valid_write_ids_result getResult(I iface, get_valid_write_ids_args args) throws org.apache.thrift.TException { + get_valid_write_ids_result result = new get_valid_write_ids_result(); + result.success = iface.get_valid_write_ids(args.req); + return result; + } + } + } public static class AsyncProcessor extends com.facebook.fb303.FacebookService.AsyncProcessor { @@ -14608,6 +14688,7 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { + public get_valid_write_ids() { + super("get_valid_write_ids"); + } + + public get_valid_write_ids_args getEmptyArgsInstance() { + return new get_valid_write_ids_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(GetValidWriteIdsResult o) { + get_valid_write_ids_result result = new get_valid_write_ids_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_valid_write_ids_result result = new get_valid_write_ids_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_valid_write_ids_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_valid_write_ids(args.req,resultHandler); + } + } + } public static class getMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { @@ -29238,13 +29370,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list642 = iprot.readListBegin(); - struct.success = new ArrayList(_list642.size); - String _elem643; - for (int _i644 = 0; _i644 < _list642.size; ++_i644) + org.apache.thrift.protocol.TList _list650 = iprot.readListBegin(); + struct.success = new ArrayList(_list650.size); + String _elem651; + for (int _i652 = 0; _i652 < _list650.size; ++_i652) { - _elem643 = iprot.readString(); - struct.success.add(_elem643); + _elem651 = iprot.readString(); + struct.success.add(_elem651); } iprot.readListEnd(); } @@ -29279,9 +29411,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter645 : struct.success) + for (String _iter653 : struct.success) { - oprot.writeString(_iter645); + oprot.writeString(_iter653); } oprot.writeListEnd(); } @@ -29320,9 +29452,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter646 : struct.success) + for (String _iter654 : struct.success) { - oprot.writeString(_iter646); + oprot.writeString(_iter654); } } } @@ -29337,13 +29469,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list647 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list647.size); - String _elem648; - for (int _i649 = 0; _i649 < _list647.size; ++_i649) + org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list655.size); + String _elem656; + for (int _i657 = 0; _i657 < _list655.size; ++_i657) { - _elem648 = iprot.readString(); - struct.success.add(_elem648); + _elem656 = iprot.readString(); + struct.success.add(_elem656); } } struct.setSuccessIsSet(true); @@ -29997,13 +30129,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list650 = iprot.readListBegin(); - struct.success = new ArrayList(_list650.size); - String _elem651; - for (int _i652 = 0; _i652 < _list650.size; ++_i652) + org.apache.thrift.protocol.TList _list658 = iprot.readListBegin(); + struct.success = new ArrayList(_list658.size); + String _elem659; + for (int _i660 = 0; _i660 < _list658.size; ++_i660) { - _elem651 = iprot.readString(); - struct.success.add(_elem651); + _elem659 = iprot.readString(); + struct.success.add(_elem659); } iprot.readListEnd(); } @@ -30038,9 +30170,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter653 : struct.success) + for (String _iter661 : struct.success) { - oprot.writeString(_iter653); + oprot.writeString(_iter661); } oprot.writeListEnd(); } @@ -30079,9 +30211,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter654 : struct.success) + for (String _iter662 : struct.success) { - oprot.writeString(_iter654); + oprot.writeString(_iter662); } } } @@ -30096,13 +30228,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list655.size); - String _elem656; - for (int _i657 = 0; _i657 < _list655.size; ++_i657) + org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list663.size); + String _elem664; + for (int _i665 = 0; _i665 < _list663.size; ++_i665) { - _elem656 = iprot.readString(); - struct.success.add(_elem656); + _elem664 = iprot.readString(); + struct.success.add(_elem664); } } struct.setSuccessIsSet(true); @@ -34709,16 +34841,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map658 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map658.size); - String _key659; - Type _val660; - for (int _i661 = 0; _i661 < _map658.size; ++_i661) + org.apache.thrift.protocol.TMap _map666 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map666.size); + String _key667; + Type _val668; + for (int _i669 = 0; _i669 < _map666.size; ++_i669) { - _key659 = iprot.readString(); - _val660 = new Type(); - _val660.read(iprot); - struct.success.put(_key659, _val660); + _key667 = iprot.readString(); + _val668 = new Type(); + _val668.read(iprot); + struct.success.put(_key667, _val668); } iprot.readMapEnd(); } @@ -34753,10 +34885,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter662 : struct.success.entrySet()) + for (Map.Entry _iter670 : struct.success.entrySet()) { - oprot.writeString(_iter662.getKey()); - _iter662.getValue().write(oprot); + oprot.writeString(_iter670.getKey()); + _iter670.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -34795,10 +34927,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter663 : struct.success.entrySet()) + for (Map.Entry _iter671 : struct.success.entrySet()) { - oprot.writeString(_iter663.getKey()); - _iter663.getValue().write(oprot); + oprot.writeString(_iter671.getKey()); + _iter671.getValue().write(oprot); } } } @@ -34813,16 +34945,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map664 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map664.size); - String _key665; - Type _val666; - for (int _i667 = 0; _i667 < _map664.size; ++_i667) + org.apache.thrift.protocol.TMap _map672 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map672.size); + String _key673; + Type _val674; + for (int _i675 = 0; _i675 < _map672.size; ++_i675) { - _key665 = iprot.readString(); - _val666 = new Type(); - _val666.read(iprot); - struct.success.put(_key665, _val666); + _key673 = iprot.readString(); + _val674 = new Type(); + _val674.read(iprot); + struct.success.put(_key673, _val674); } } struct.setSuccessIsSet(true); @@ -35857,14 +35989,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list668 = iprot.readListBegin(); - struct.success = new ArrayList(_list668.size); - FieldSchema _elem669; - for (int _i670 = 0; _i670 < _list668.size; ++_i670) + org.apache.thrift.protocol.TList _list676 = iprot.readListBegin(); + struct.success = new ArrayList(_list676.size); + FieldSchema _elem677; + for (int _i678 = 0; _i678 < _list676.size; ++_i678) { - _elem669 = new FieldSchema(); - _elem669.read(iprot); - struct.success.add(_elem669); + _elem677 = new FieldSchema(); + _elem677.read(iprot); + struct.success.add(_elem677); } iprot.readListEnd(); } @@ -35917,9 +36049,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter671 : struct.success) + for (FieldSchema _iter679 : struct.success) { - _iter671.write(oprot); + _iter679.write(oprot); } oprot.writeListEnd(); } @@ -35974,9 +36106,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter672 : struct.success) + for (FieldSchema _iter680 : struct.success) { - _iter672.write(oprot); + _iter680.write(oprot); } } } @@ -35997,14 +36129,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list673 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list673.size); - FieldSchema _elem674; - for (int _i675 = 0; _i675 < _list673.size; ++_i675) + org.apache.thrift.protocol.TList _list681 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list681.size); + FieldSchema _elem682; + for (int _i683 = 0; _i683 < _list681.size; ++_i683) { - _elem674 = new FieldSchema(); - _elem674.read(iprot); - struct.success.add(_elem674); + _elem682 = new FieldSchema(); + _elem682.read(iprot); + struct.success.add(_elem682); } } struct.setSuccessIsSet(true); @@ -37158,14 +37290,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list676 = iprot.readListBegin(); - struct.success = new ArrayList(_list676.size); - FieldSchema _elem677; - for (int _i678 = 0; _i678 < _list676.size; ++_i678) + org.apache.thrift.protocol.TList _list684 = iprot.readListBegin(); + struct.success = new ArrayList(_list684.size); + FieldSchema _elem685; + for (int _i686 = 0; _i686 < _list684.size; ++_i686) { - _elem677 = new FieldSchema(); - _elem677.read(iprot); - struct.success.add(_elem677); + _elem685 = new FieldSchema(); + _elem685.read(iprot); + struct.success.add(_elem685); } iprot.readListEnd(); } @@ -37218,9 +37350,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter679 : struct.success) + for (FieldSchema _iter687 : struct.success) { - _iter679.write(oprot); + _iter687.write(oprot); } oprot.writeListEnd(); } @@ -37275,9 +37407,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter680 : struct.success) + for (FieldSchema _iter688 : struct.success) { - _iter680.write(oprot); + _iter688.write(oprot); } } } @@ -37298,14 +37430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list681 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list681.size); - FieldSchema _elem682; - for (int _i683 = 0; _i683 < _list681.size; ++_i683) + org.apache.thrift.protocol.TList _list689 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list689.size); + FieldSchema _elem690; + for (int _i691 = 0; _i691 < _list689.size; ++_i691) { - _elem682 = new FieldSchema(); - _elem682.read(iprot); - struct.success.add(_elem682); + _elem690 = new FieldSchema(); + _elem690.read(iprot); + struct.success.add(_elem690); } } struct.setSuccessIsSet(true); @@ -38350,14 +38482,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list684 = iprot.readListBegin(); - struct.success = new ArrayList(_list684.size); - FieldSchema _elem685; - for (int _i686 = 0; _i686 < _list684.size; ++_i686) + org.apache.thrift.protocol.TList _list692 = iprot.readListBegin(); + struct.success = new ArrayList(_list692.size); + FieldSchema _elem693; + for (int _i694 = 0; _i694 < _list692.size; ++_i694) { - _elem685 = new FieldSchema(); - _elem685.read(iprot); - struct.success.add(_elem685); + _elem693 = new FieldSchema(); + _elem693.read(iprot); + struct.success.add(_elem693); } iprot.readListEnd(); } @@ -38410,9 +38542,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter687 : struct.success) + for (FieldSchema _iter695 : struct.success) { - _iter687.write(oprot); + _iter695.write(oprot); } oprot.writeListEnd(); } @@ -38467,9 +38599,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter688 : struct.success) + for (FieldSchema _iter696 : struct.success) { - _iter688.write(oprot); + _iter696.write(oprot); } } } @@ -38490,14 +38622,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list689 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list689.size); - FieldSchema _elem690; - for (int _i691 = 0; _i691 < _list689.size; ++_i691) + org.apache.thrift.protocol.TList _list697 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list697.size); + FieldSchema _elem698; + for (int _i699 = 0; _i699 < _list697.size; ++_i699) { - _elem690 = new FieldSchema(); - _elem690.read(iprot); - struct.success.add(_elem690); + _elem698 = new FieldSchema(); + _elem698.read(iprot); + struct.success.add(_elem698); } } struct.setSuccessIsSet(true); @@ -39651,14 +39783,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list692 = iprot.readListBegin(); - struct.success = new ArrayList(_list692.size); - FieldSchema _elem693; - for (int _i694 = 0; _i694 < _list692.size; ++_i694) + org.apache.thrift.protocol.TList _list700 = iprot.readListBegin(); + struct.success = new ArrayList(_list700.size); + FieldSchema _elem701; + for (int _i702 = 0; _i702 < _list700.size; ++_i702) { - _elem693 = new FieldSchema(); - _elem693.read(iprot); - struct.success.add(_elem693); + _elem701 = new FieldSchema(); + _elem701.read(iprot); + struct.success.add(_elem701); } iprot.readListEnd(); } @@ -39711,9 +39843,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter695 : struct.success) + for (FieldSchema _iter703 : struct.success) { - _iter695.write(oprot); + _iter703.write(oprot); } oprot.writeListEnd(); } @@ -39768,9 +39900,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter696 : struct.success) + for (FieldSchema _iter704 : struct.success) { - _iter696.write(oprot); + _iter704.write(oprot); } } } @@ -39791,14 +39923,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list697 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list697.size); - FieldSchema _elem698; - for (int _i699 = 0; _i699 < _list697.size; ++_i699) + org.apache.thrift.protocol.TList _list705 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list705.size); + FieldSchema _elem706; + for (int _i707 = 0; _i707 < _list705.size; ++_i707) { - _elem698 = new FieldSchema(); - _elem698.read(iprot); - struct.success.add(_elem698); + _elem706 = new FieldSchema(); + _elem706.read(iprot); + struct.success.add(_elem706); } } struct.setSuccessIsSet(true); @@ -42523,14 +42655,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list700 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list700.size); - SQLPrimaryKey _elem701; - for (int _i702 = 0; _i702 < _list700.size; ++_i702) + org.apache.thrift.protocol.TList _list708 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list708.size); + SQLPrimaryKey _elem709; + for (int _i710 = 0; _i710 < _list708.size; ++_i710) { - _elem701 = new SQLPrimaryKey(); - _elem701.read(iprot); - struct.primaryKeys.add(_elem701); + _elem709 = new SQLPrimaryKey(); + _elem709.read(iprot); + struct.primaryKeys.add(_elem709); } iprot.readListEnd(); } @@ -42542,14 +42674,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list703 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list703.size); - SQLForeignKey _elem704; - for (int _i705 = 0; _i705 < _list703.size; ++_i705) + org.apache.thrift.protocol.TList _list711 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list711.size); + SQLForeignKey _elem712; + for (int _i713 = 0; _i713 < _list711.size; ++_i713) { - _elem704 = new SQLForeignKey(); - _elem704.read(iprot); - struct.foreignKeys.add(_elem704); + _elem712 = new SQLForeignKey(); + _elem712.read(iprot); + struct.foreignKeys.add(_elem712); } iprot.readListEnd(); } @@ -42580,9 +42712,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter706 : struct.primaryKeys) + for (SQLPrimaryKey _iter714 : struct.primaryKeys) { - _iter706.write(oprot); + _iter714.write(oprot); } oprot.writeListEnd(); } @@ -42592,9 +42724,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter707 : struct.foreignKeys) + for (SQLForeignKey _iter715 : struct.foreignKeys) { - _iter707.write(oprot); + _iter715.write(oprot); } oprot.writeListEnd(); } @@ -42634,18 +42766,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter708 : struct.primaryKeys) + for (SQLPrimaryKey _iter716 : struct.primaryKeys) { - _iter708.write(oprot); + _iter716.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter709 : struct.foreignKeys) + for (SQLForeignKey _iter717 : struct.foreignKeys) { - _iter709.write(oprot); + _iter717.write(oprot); } } } @@ -42662,28 +42794,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list710 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list710.size); - SQLPrimaryKey _elem711; - for (int _i712 = 0; _i712 < _list710.size; ++_i712) + org.apache.thrift.protocol.TList _list718 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list718.size); + SQLPrimaryKey _elem719; + for (int _i720 = 0; _i720 < _list718.size; ++_i720) { - _elem711 = new SQLPrimaryKey(); - _elem711.read(iprot); - struct.primaryKeys.add(_elem711); + _elem719 = new SQLPrimaryKey(); + _elem719.read(iprot); + struct.primaryKeys.add(_elem719); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list713.size); - SQLForeignKey _elem714; - for (int _i715 = 0; _i715 < _list713.size; ++_i715) + org.apache.thrift.protocol.TList _list721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list721.size); + SQLForeignKey _elem722; + for (int _i723 = 0; _i723 < _list721.size; ++_i723) { - _elem714 = new SQLForeignKey(); - _elem714.read(iprot); - struct.foreignKeys.add(_elem714); + _elem722 = new SQLForeignKey(); + _elem722.read(iprot); + struct.foreignKeys.add(_elem722); } } struct.setForeignKeysIsSet(true); @@ -48882,13 +49014,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list716 = iprot.readListBegin(); - struct.success = new ArrayList(_list716.size); - String _elem717; - for (int _i718 = 0; _i718 < _list716.size; ++_i718) + org.apache.thrift.protocol.TList _list724 = iprot.readListBegin(); + struct.success = new ArrayList(_list724.size); + String _elem725; + for (int _i726 = 0; _i726 < _list724.size; ++_i726) { - _elem717 = iprot.readString(); - struct.success.add(_elem717); + _elem725 = iprot.readString(); + struct.success.add(_elem725); } iprot.readListEnd(); } @@ -48923,9 +49055,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter719 : struct.success) + for (String _iter727 : struct.success) { - oprot.writeString(_iter719); + oprot.writeString(_iter727); } oprot.writeListEnd(); } @@ -48964,9 +49096,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter720 : struct.success) + for (String _iter728 : struct.success) { - oprot.writeString(_iter720); + oprot.writeString(_iter728); } } } @@ -48981,13 +49113,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list721.size); - String _elem722; - for (int _i723 = 0; _i723 < _list721.size; ++_i723) + org.apache.thrift.protocol.TList _list729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list729.size); + String _elem730; + for (int _i731 = 0; _i731 < _list729.size; ++_i731) { - _elem722 = iprot.readString(); - struct.success.add(_elem722); + _elem730 = iprot.readString(); + struct.success.add(_elem730); } } struct.setSuccessIsSet(true); @@ -49492,13 +49624,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list724 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list724.size); - String _elem725; - for (int _i726 = 0; _i726 < _list724.size; ++_i726) + org.apache.thrift.protocol.TList _list732 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list732.size); + String _elem733; + for (int _i734 = 0; _i734 < _list732.size; ++_i734) { - _elem725 = iprot.readString(); - struct.tbl_types.add(_elem725); + _elem733 = iprot.readString(); + struct.tbl_types.add(_elem733); } iprot.readListEnd(); } @@ -49534,9 +49666,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter727 : struct.tbl_types) + for (String _iter735 : struct.tbl_types) { - oprot.writeString(_iter727); + oprot.writeString(_iter735); } oprot.writeListEnd(); } @@ -49579,9 +49711,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter728 : struct.tbl_types) + for (String _iter736 : struct.tbl_types) { - oprot.writeString(_iter728); + oprot.writeString(_iter736); } } } @@ -49601,13 +49733,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list729.size); - String _elem730; - for (int _i731 = 0; _i731 < _list729.size; ++_i731) + org.apache.thrift.protocol.TList _list737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list737.size); + String _elem738; + for (int _i739 = 0; _i739 < _list737.size; ++_i739) { - _elem730 = iprot.readString(); - struct.tbl_types.add(_elem730); + _elem738 = iprot.readString(); + struct.tbl_types.add(_elem738); } } struct.setTbl_typesIsSet(true); @@ -50013,14 +50145,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list732 = iprot.readListBegin(); - struct.success = new ArrayList(_list732.size); - TableMeta _elem733; - for (int _i734 = 0; _i734 < _list732.size; ++_i734) + org.apache.thrift.protocol.TList _list740 = iprot.readListBegin(); + struct.success = new ArrayList(_list740.size); + TableMeta _elem741; + for (int _i742 = 0; _i742 < _list740.size; ++_i742) { - _elem733 = new TableMeta(); - _elem733.read(iprot); - struct.success.add(_elem733); + _elem741 = new TableMeta(); + _elem741.read(iprot); + struct.success.add(_elem741); } iprot.readListEnd(); } @@ -50055,9 +50187,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter735 : struct.success) + for (TableMeta _iter743 : struct.success) { - _iter735.write(oprot); + _iter743.write(oprot); } oprot.writeListEnd(); } @@ -50096,9 +50228,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter736 : struct.success) + for (TableMeta _iter744 : struct.success) { - _iter736.write(oprot); + _iter744.write(oprot); } } } @@ -50113,14 +50245,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list737.size); - TableMeta _elem738; - for (int _i739 = 0; _i739 < _list737.size; ++_i739) + org.apache.thrift.protocol.TList _list745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list745.size); + TableMeta _elem746; + for (int _i747 = 0; _i747 < _list745.size; ++_i747) { - _elem738 = new TableMeta(); - _elem738.read(iprot); - struct.success.add(_elem738); + _elem746 = new TableMeta(); + _elem746.read(iprot); + struct.success.add(_elem746); } } struct.setSuccessIsSet(true); @@ -50886,13 +51018,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list740 = iprot.readListBegin(); - struct.success = new ArrayList(_list740.size); - String _elem741; - for (int _i742 = 0; _i742 < _list740.size; ++_i742) + org.apache.thrift.protocol.TList _list748 = iprot.readListBegin(); + struct.success = new ArrayList(_list748.size); + String _elem749; + for (int _i750 = 0; _i750 < _list748.size; ++_i750) { - _elem741 = iprot.readString(); - struct.success.add(_elem741); + _elem749 = iprot.readString(); + struct.success.add(_elem749); } iprot.readListEnd(); } @@ -50927,9 +51059,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter743 : struct.success) + for (String _iter751 : struct.success) { - oprot.writeString(_iter743); + oprot.writeString(_iter751); } oprot.writeListEnd(); } @@ -50968,9 +51100,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter744 : struct.success) + for (String _iter752 : struct.success) { - oprot.writeString(_iter744); + oprot.writeString(_iter752); } } } @@ -50985,13 +51117,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list745.size); - String _elem746; - for (int _i747 = 0; _i747 < _list745.size; ++_i747) + org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list753.size); + String _elem754; + for (int _i755 = 0; _i755 < _list753.size; ++_i755) { - _elem746 = iprot.readString(); - struct.success.add(_elem746); + _elem754 = iprot.readString(); + struct.success.add(_elem754); } } struct.setSuccessIsSet(true); @@ -52444,13 +52576,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list748 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list748.size); - String _elem749; - for (int _i750 = 0; _i750 < _list748.size; ++_i750) + org.apache.thrift.protocol.TList _list756 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list756.size); + String _elem757; + for (int _i758 = 0; _i758 < _list756.size; ++_i758) { - _elem749 = iprot.readString(); - struct.tbl_names.add(_elem749); + _elem757 = iprot.readString(); + struct.tbl_names.add(_elem757); } iprot.readListEnd(); } @@ -52481,9 +52613,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter751 : struct.tbl_names) + for (String _iter759 : struct.tbl_names) { - oprot.writeString(_iter751); + oprot.writeString(_iter759); } oprot.writeListEnd(); } @@ -52520,9 +52652,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter752 : struct.tbl_names) + for (String _iter760 : struct.tbl_names) { - oprot.writeString(_iter752); + oprot.writeString(_iter760); } } } @@ -52538,13 +52670,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list753.size); - String _elem754; - for (int _i755 = 0; _i755 < _list753.size; ++_i755) + org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list761.size); + String _elem762; + for (int _i763 = 0; _i763 < _list761.size; ++_i763) { - _elem754 = iprot.readString(); - struct.tbl_names.add(_elem754); + _elem762 = iprot.readString(); + struct.tbl_names.add(_elem762); } } struct.setTbl_namesIsSet(true); @@ -53112,14 +53244,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list756 = iprot.readListBegin(); - struct.success = new ArrayList
(_list756.size); - Table _elem757; - for (int _i758 = 0; _i758 < _list756.size; ++_i758) + org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); + struct.success = new ArrayList
(_list764.size); + Table _elem765; + for (int _i766 = 0; _i766 < _list764.size; ++_i766) { - _elem757 = new Table(); - _elem757.read(iprot); - struct.success.add(_elem757); + _elem765 = new Table(); + _elem765.read(iprot); + struct.success.add(_elem765); } iprot.readListEnd(); } @@ -53172,9 +53304,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter759 : struct.success) + for (Table _iter767 : struct.success) { - _iter759.write(oprot); + _iter767.write(oprot); } oprot.writeListEnd(); } @@ -53229,9 +53361,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter760 : struct.success) + for (Table _iter768 : struct.success) { - _iter760.write(oprot); + _iter768.write(oprot); } } } @@ -53252,14 +53384,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list761.size); - Table _elem762; - for (int _i763 = 0; _i763 < _list761.size; ++_i763) + org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list769.size); + Table _elem770; + for (int _i771 = 0; _i771 < _list769.size; ++_i771) { - _elem762 = new Table(); - _elem762.read(iprot); - struct.success.add(_elem762); + _elem770 = new Table(); + _elem770.read(iprot); + struct.success.add(_elem770); } } struct.setSuccessIsSet(true); @@ -54405,13 +54537,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); - struct.success = new ArrayList(_list764.size); - String _elem765; - for (int _i766 = 0; _i766 < _list764.size; ++_i766) + org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); + struct.success = new ArrayList(_list772.size); + String _elem773; + for (int _i774 = 0; _i774 < _list772.size; ++_i774) { - _elem765 = iprot.readString(); - struct.success.add(_elem765); + _elem773 = iprot.readString(); + struct.success.add(_elem773); } iprot.readListEnd(); } @@ -54464,9 +54596,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter767 : struct.success) + for (String _iter775 : struct.success) { - oprot.writeString(_iter767); + oprot.writeString(_iter775); } oprot.writeListEnd(); } @@ -54521,9 +54653,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter768 : struct.success) + for (String _iter776 : struct.success) { - oprot.writeString(_iter768); + oprot.writeString(_iter776); } } } @@ -54544,13 +54676,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list769.size); - String _elem770; - for (int _i771 = 0; _i771 < _list769.size; ++_i771) + org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list777.size); + String _elem778; + for (int _i779 = 0; _i779 < _list777.size; ++_i779) { - _elem770 = iprot.readString(); - struct.success.add(_elem770); + _elem778 = iprot.readString(); + struct.success.add(_elem778); } } struct.setSuccessIsSet(true); @@ -60409,14 +60541,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list772.size); - Partition _elem773; - for (int _i774 = 0; _i774 < _list772.size; ++_i774) + org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list780.size); + Partition _elem781; + for (int _i782 = 0; _i782 < _list780.size; ++_i782) { - _elem773 = new Partition(); - _elem773.read(iprot); - struct.new_parts.add(_elem773); + _elem781 = new Partition(); + _elem781.read(iprot); + struct.new_parts.add(_elem781); } iprot.readListEnd(); } @@ -60442,9 +60574,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter775 : struct.new_parts) + for (Partition _iter783 : struct.new_parts) { - _iter775.write(oprot); + _iter783.write(oprot); } oprot.writeListEnd(); } @@ -60475,9 +60607,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter776 : struct.new_parts) + for (Partition _iter784 : struct.new_parts) { - _iter776.write(oprot); + _iter784.write(oprot); } } } @@ -60489,14 +60621,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list777.size); - Partition _elem778; - for (int _i779 = 0; _i779 < _list777.size; ++_i779) + org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list785.size); + Partition _elem786; + for (int _i787 = 0; _i787 < _list785.size; ++_i787) { - _elem778 = new Partition(); - _elem778.read(iprot); - struct.new_parts.add(_elem778); + _elem786 = new Partition(); + _elem786.read(iprot); + struct.new_parts.add(_elem786); } } struct.setNew_partsIsSet(true); @@ -61497,14 +61629,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list780.size); - PartitionSpec _elem781; - for (int _i782 = 0; _i782 < _list780.size; ++_i782) + org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list788.size); + PartitionSpec _elem789; + for (int _i790 = 0; _i790 < _list788.size; ++_i790) { - _elem781 = new PartitionSpec(); - _elem781.read(iprot); - struct.new_parts.add(_elem781); + _elem789 = new PartitionSpec(); + _elem789.read(iprot); + struct.new_parts.add(_elem789); } iprot.readListEnd(); } @@ -61530,9 +61662,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter783 : struct.new_parts) + for (PartitionSpec _iter791 : struct.new_parts) { - _iter783.write(oprot); + _iter791.write(oprot); } oprot.writeListEnd(); } @@ -61563,9 +61695,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter784 : struct.new_parts) + for (PartitionSpec _iter792 : struct.new_parts) { - _iter784.write(oprot); + _iter792.write(oprot); } } } @@ -61577,14 +61709,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list785.size); - PartitionSpec _elem786; - for (int _i787 = 0; _i787 < _list785.size; ++_i787) + org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list793.size); + PartitionSpec _elem794; + for (int _i795 = 0; _i795 < _list793.size; ++_i795) { - _elem786 = new PartitionSpec(); - _elem786.read(iprot); - struct.new_parts.add(_elem786); + _elem794 = new PartitionSpec(); + _elem794.read(iprot); + struct.new_parts.add(_elem794); } } struct.setNew_partsIsSet(true); @@ -62760,13 +62892,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list788.size); - String _elem789; - for (int _i790 = 0; _i790 < _list788.size; ++_i790) + org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list796.size); + String _elem797; + for (int _i798 = 0; _i798 < _list796.size; ++_i798) { - _elem789 = iprot.readString(); - struct.part_vals.add(_elem789); + _elem797 = iprot.readString(); + struct.part_vals.add(_elem797); } iprot.readListEnd(); } @@ -62802,9 +62934,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter791 : struct.part_vals) + for (String _iter799 : struct.part_vals) { - oprot.writeString(_iter791); + oprot.writeString(_iter799); } oprot.writeListEnd(); } @@ -62847,9 +62979,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter792 : struct.part_vals) + for (String _iter800 : struct.part_vals) { - oprot.writeString(_iter792); + oprot.writeString(_iter800); } } } @@ -62869,13 +63001,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list793.size); - String _elem794; - for (int _i795 = 0; _i795 < _list793.size; ++_i795) + org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list801.size); + String _elem802; + for (int _i803 = 0; _i803 < _list801.size; ++_i803) { - _elem794 = iprot.readString(); - struct.part_vals.add(_elem794); + _elem802 = iprot.readString(); + struct.part_vals.add(_elem802); } } struct.setPart_valsIsSet(true); @@ -65184,13 +65316,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list796.size); - String _elem797; - for (int _i798 = 0; _i798 < _list796.size; ++_i798) + org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list804.size); + String _elem805; + for (int _i806 = 0; _i806 < _list804.size; ++_i806) { - _elem797 = iprot.readString(); - struct.part_vals.add(_elem797); + _elem805 = iprot.readString(); + struct.part_vals.add(_elem805); } iprot.readListEnd(); } @@ -65235,9 +65367,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter799 : struct.part_vals) + for (String _iter807 : struct.part_vals) { - oprot.writeString(_iter799); + oprot.writeString(_iter807); } oprot.writeListEnd(); } @@ -65288,9 +65420,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter800 : struct.part_vals) + for (String _iter808 : struct.part_vals) { - oprot.writeString(_iter800); + oprot.writeString(_iter808); } } } @@ -65313,13 +65445,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list801.size); - String _elem802; - for (int _i803 = 0; _i803 < _list801.size; ++_i803) + org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list809.size); + String _elem810; + for (int _i811 = 0; _i811 < _list809.size; ++_i811) { - _elem802 = iprot.readString(); - struct.part_vals.add(_elem802); + _elem810 = iprot.readString(); + struct.part_vals.add(_elem810); } } struct.setPart_valsIsSet(true); @@ -69189,13 +69321,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list804.size); - String _elem805; - for (int _i806 = 0; _i806 < _list804.size; ++_i806) + org.apache.thrift.protocol.TList _list812 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list812.size); + String _elem813; + for (int _i814 = 0; _i814 < _list812.size; ++_i814) { - _elem805 = iprot.readString(); - struct.part_vals.add(_elem805); + _elem813 = iprot.readString(); + struct.part_vals.add(_elem813); } iprot.readListEnd(); } @@ -69239,9 +69371,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter807 : struct.part_vals) + for (String _iter815 : struct.part_vals) { - oprot.writeString(_iter807); + oprot.writeString(_iter815); } oprot.writeListEnd(); } @@ -69290,9 +69422,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter808 : struct.part_vals) + for (String _iter816 : struct.part_vals) { - oprot.writeString(_iter808); + oprot.writeString(_iter816); } } } @@ -69315,13 +69447,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list809.size); - String _elem810; - for (int _i811 = 0; _i811 < _list809.size; ++_i811) + org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list817.size); + String _elem818; + for (int _i819 = 0; _i819 < _list817.size; ++_i819) { - _elem810 = iprot.readString(); - struct.part_vals.add(_elem810); + _elem818 = iprot.readString(); + struct.part_vals.add(_elem818); } } struct.setPart_valsIsSet(true); @@ -70560,13 +70692,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list812 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list812.size); - String _elem813; - for (int _i814 = 0; _i814 < _list812.size; ++_i814) + org.apache.thrift.protocol.TList _list820 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list820.size); + String _elem821; + for (int _i822 = 0; _i822 < _list820.size; ++_i822) { - _elem813 = iprot.readString(); - struct.part_vals.add(_elem813); + _elem821 = iprot.readString(); + struct.part_vals.add(_elem821); } iprot.readListEnd(); } @@ -70619,9 +70751,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter815 : struct.part_vals) + for (String _iter823 : struct.part_vals) { - oprot.writeString(_iter815); + oprot.writeString(_iter823); } oprot.writeListEnd(); } @@ -70678,9 +70810,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter816 : struct.part_vals) + for (String _iter824 : struct.part_vals) { - oprot.writeString(_iter816); + oprot.writeString(_iter824); } } } @@ -70706,13 +70838,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list817.size); - String _elem818; - for (int _i819 = 0; _i819 < _list817.size; ++_i819) + org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list825.size); + String _elem826; + for (int _i827 = 0; _i827 < _list825.size; ++_i827) { - _elem818 = iprot.readString(); - struct.part_vals.add(_elem818); + _elem826 = iprot.readString(); + struct.part_vals.add(_elem826); } } struct.setPart_valsIsSet(true); @@ -75314,13 +75446,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list820 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list820.size); - String _elem821; - for (int _i822 = 0; _i822 < _list820.size; ++_i822) + org.apache.thrift.protocol.TList _list828 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list828.size); + String _elem829; + for (int _i830 = 0; _i830 < _list828.size; ++_i830) { - _elem821 = iprot.readString(); - struct.part_vals.add(_elem821); + _elem829 = iprot.readString(); + struct.part_vals.add(_elem829); } iprot.readListEnd(); } @@ -75356,9 +75488,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter823 : struct.part_vals) + for (String _iter831 : struct.part_vals) { - oprot.writeString(_iter823); + oprot.writeString(_iter831); } oprot.writeListEnd(); } @@ -75401,9 +75533,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter824 : struct.part_vals) + for (String _iter832 : struct.part_vals) { - oprot.writeString(_iter824); + oprot.writeString(_iter832); } } } @@ -75423,13 +75555,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list825.size); - String _elem826; - for (int _i827 = 0; _i827 < _list825.size; ++_i827) + org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list833.size); + String _elem834; + for (int _i835 = 0; _i835 < _list833.size; ++_i835) { - _elem826 = iprot.readString(); - struct.part_vals.add(_elem826); + _elem834 = iprot.readString(); + struct.part_vals.add(_elem834); } } struct.setPart_valsIsSet(true); @@ -76647,15 +76779,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map828 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map828.size); - String _key829; - String _val830; - for (int _i831 = 0; _i831 < _map828.size; ++_i831) + org.apache.thrift.protocol.TMap _map836 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map836.size); + String _key837; + String _val838; + for (int _i839 = 0; _i839 < _map836.size; ++_i839) { - _key829 = iprot.readString(); - _val830 = iprot.readString(); - struct.partitionSpecs.put(_key829, _val830); + _key837 = iprot.readString(); + _val838 = iprot.readString(); + struct.partitionSpecs.put(_key837, _val838); } iprot.readMapEnd(); } @@ -76713,10 +76845,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter832 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter840 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter832.getKey()); - oprot.writeString(_iter832.getValue()); + oprot.writeString(_iter840.getKey()); + oprot.writeString(_iter840.getValue()); } oprot.writeMapEnd(); } @@ -76779,10 +76911,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter833 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter841 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter833.getKey()); - oprot.writeString(_iter833.getValue()); + oprot.writeString(_iter841.getKey()); + oprot.writeString(_iter841.getValue()); } } } @@ -76806,15 +76938,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map834 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map834.size); - String _key835; - String _val836; - for (int _i837 = 0; _i837 < _map834.size; ++_i837) + org.apache.thrift.protocol.TMap _map842 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map842.size); + String _key843; + String _val844; + for (int _i845 = 0; _i845 < _map842.size; ++_i845) { - _key835 = iprot.readString(); - _val836 = iprot.readString(); - struct.partitionSpecs.put(_key835, _val836); + _key843 = iprot.readString(); + _val844 = iprot.readString(); + struct.partitionSpecs.put(_key843, _val844); } } struct.setPartitionSpecsIsSet(true); @@ -78260,15 +78392,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map838 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map838.size); - String _key839; - String _val840; - for (int _i841 = 0; _i841 < _map838.size; ++_i841) + org.apache.thrift.protocol.TMap _map846 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map846.size); + String _key847; + String _val848; + for (int _i849 = 0; _i849 < _map846.size; ++_i849) { - _key839 = iprot.readString(); - _val840 = iprot.readString(); - struct.partitionSpecs.put(_key839, _val840); + _key847 = iprot.readString(); + _val848 = iprot.readString(); + struct.partitionSpecs.put(_key847, _val848); } iprot.readMapEnd(); } @@ -78326,10 +78458,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter842 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter850 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter842.getKey()); - oprot.writeString(_iter842.getValue()); + oprot.writeString(_iter850.getKey()); + oprot.writeString(_iter850.getValue()); } oprot.writeMapEnd(); } @@ -78392,10 +78524,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter843 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter851 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter843.getKey()); - oprot.writeString(_iter843.getValue()); + oprot.writeString(_iter851.getKey()); + oprot.writeString(_iter851.getValue()); } } } @@ -78419,15 +78551,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map844 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map844.size); - String _key845; - String _val846; - for (int _i847 = 0; _i847 < _map844.size; ++_i847) + org.apache.thrift.protocol.TMap _map852 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map852.size); + String _key853; + String _val854; + for (int _i855 = 0; _i855 < _map852.size; ++_i855) { - _key845 = iprot.readString(); - _val846 = iprot.readString(); - struct.partitionSpecs.put(_key845, _val846); + _key853 = iprot.readString(); + _val854 = iprot.readString(); + struct.partitionSpecs.put(_key853, _val854); } } struct.setPartitionSpecsIsSet(true); @@ -79092,14 +79224,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list848 = iprot.readListBegin(); - struct.success = new ArrayList(_list848.size); - Partition _elem849; - for (int _i850 = 0; _i850 < _list848.size; ++_i850) + org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); + struct.success = new ArrayList(_list856.size); + Partition _elem857; + for (int _i858 = 0; _i858 < _list856.size; ++_i858) { - _elem849 = new Partition(); - _elem849.read(iprot); - struct.success.add(_elem849); + _elem857 = new Partition(); + _elem857.read(iprot); + struct.success.add(_elem857); } iprot.readListEnd(); } @@ -79161,9 +79293,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter851 : struct.success) + for (Partition _iter859 : struct.success) { - _iter851.write(oprot); + _iter859.write(oprot); } oprot.writeListEnd(); } @@ -79226,9 +79358,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter852 : struct.success) + for (Partition _iter860 : struct.success) { - _iter852.write(oprot); + _iter860.write(oprot); } } } @@ -79252,14 +79384,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list853 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list853.size); - Partition _elem854; - for (int _i855 = 0; _i855 < _list853.size; ++_i855) + org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list861.size); + Partition _elem862; + for (int _i863 = 0; _i863 < _list861.size; ++_i863) { - _elem854 = new Partition(); - _elem854.read(iprot); - struct.success.add(_elem854); + _elem862 = new Partition(); + _elem862.read(iprot); + struct.success.add(_elem862); } } struct.setSuccessIsSet(true); @@ -79958,13 +80090,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list856.size); - String _elem857; - for (int _i858 = 0; _i858 < _list856.size; ++_i858) + org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list864.size); + String _elem865; + for (int _i866 = 0; _i866 < _list864.size; ++_i866) { - _elem857 = iprot.readString(); - struct.part_vals.add(_elem857); + _elem865 = iprot.readString(); + struct.part_vals.add(_elem865); } iprot.readListEnd(); } @@ -79984,13 +80116,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list859 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list859.size); - String _elem860; - for (int _i861 = 0; _i861 < _list859.size; ++_i861) + org.apache.thrift.protocol.TList _list867 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list867.size); + String _elem868; + for (int _i869 = 0; _i869 < _list867.size; ++_i869) { - _elem860 = iprot.readString(); - struct.group_names.add(_elem860); + _elem868 = iprot.readString(); + struct.group_names.add(_elem868); } iprot.readListEnd(); } @@ -80026,9 +80158,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter862 : struct.part_vals) + for (String _iter870 : struct.part_vals) { - oprot.writeString(_iter862); + oprot.writeString(_iter870); } oprot.writeListEnd(); } @@ -80043,9 +80175,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter863 : struct.group_names) + for (String _iter871 : struct.group_names) { - oprot.writeString(_iter863); + oprot.writeString(_iter871); } oprot.writeListEnd(); } @@ -80094,9 +80226,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter864 : struct.part_vals) + for (String _iter872 : struct.part_vals) { - oprot.writeString(_iter864); + oprot.writeString(_iter872); } } } @@ -80106,9 +80238,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter865 : struct.group_names) + for (String _iter873 : struct.group_names) { - oprot.writeString(_iter865); + oprot.writeString(_iter873); } } } @@ -80128,13 +80260,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list866 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list866.size); - String _elem867; - for (int _i868 = 0; _i868 < _list866.size; ++_i868) + org.apache.thrift.protocol.TList _list874 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list874.size); + String _elem875; + for (int _i876 = 0; _i876 < _list874.size; ++_i876) { - _elem867 = iprot.readString(); - struct.part_vals.add(_elem867); + _elem875 = iprot.readString(); + struct.part_vals.add(_elem875); } } struct.setPart_valsIsSet(true); @@ -80145,13 +80277,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list869.size); - String _elem870; - for (int _i871 = 0; _i871 < _list869.size; ++_i871) + org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list877.size); + String _elem878; + for (int _i879 = 0; _i879 < _list877.size; ++_i879) { - _elem870 = iprot.readString(); - struct.group_names.add(_elem870); + _elem878 = iprot.readString(); + struct.group_names.add(_elem878); } } struct.setGroup_namesIsSet(true); @@ -82920,14 +83052,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); - struct.success = new ArrayList(_list872.size); - Partition _elem873; - for (int _i874 = 0; _i874 < _list872.size; ++_i874) + org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); + struct.success = new ArrayList(_list880.size); + Partition _elem881; + for (int _i882 = 0; _i882 < _list880.size; ++_i882) { - _elem873 = new Partition(); - _elem873.read(iprot); - struct.success.add(_elem873); + _elem881 = new Partition(); + _elem881.read(iprot); + struct.success.add(_elem881); } iprot.readListEnd(); } @@ -82971,9 +83103,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter875 : struct.success) + for (Partition _iter883 : struct.success) { - _iter875.write(oprot); + _iter883.write(oprot); } oprot.writeListEnd(); } @@ -83020,9 +83152,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter876 : struct.success) + for (Partition _iter884 : struct.success) { - _iter876.write(oprot); + _iter884.write(oprot); } } } @@ -83040,14 +83172,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list877.size); - Partition _elem878; - for (int _i879 = 0; _i879 < _list877.size; ++_i879) + org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list885.size); + Partition _elem886; + for (int _i887 = 0; _i887 < _list885.size; ++_i887) { - _elem878 = new Partition(); - _elem878.read(iprot); - struct.success.add(_elem878); + _elem886 = new Partition(); + _elem886.read(iprot); + struct.success.add(_elem886); } } struct.setSuccessIsSet(true); @@ -83737,13 +83869,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list880.size); - String _elem881; - for (int _i882 = 0; _i882 < _list880.size; ++_i882) + org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list888.size); + String _elem889; + for (int _i890 = 0; _i890 < _list888.size; ++_i890) { - _elem881 = iprot.readString(); - struct.group_names.add(_elem881); + _elem889 = iprot.readString(); + struct.group_names.add(_elem889); } iprot.readListEnd(); } @@ -83787,9 +83919,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter883 : struct.group_names) + for (String _iter891 : struct.group_names) { - oprot.writeString(_iter883); + oprot.writeString(_iter891); } oprot.writeListEnd(); } @@ -83844,9 +83976,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter884 : struct.group_names) + for (String _iter892 : struct.group_names) { - oprot.writeString(_iter884); + oprot.writeString(_iter892); } } } @@ -83874,13 +84006,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list885.size); - String _elem886; - for (int _i887 = 0; _i887 < _list885.size; ++_i887) + org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list893.size); + String _elem894; + for (int _i895 = 0; _i895 < _list893.size; ++_i895) { - _elem886 = iprot.readString(); - struct.group_names.add(_elem886); + _elem894 = iprot.readString(); + struct.group_names.add(_elem894); } } struct.setGroup_namesIsSet(true); @@ -84367,14 +84499,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); - struct.success = new ArrayList(_list888.size); - Partition _elem889; - for (int _i890 = 0; _i890 < _list888.size; ++_i890) + org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); + struct.success = new ArrayList(_list896.size); + Partition _elem897; + for (int _i898 = 0; _i898 < _list896.size; ++_i898) { - _elem889 = new Partition(); - _elem889.read(iprot); - struct.success.add(_elem889); + _elem897 = new Partition(); + _elem897.read(iprot); + struct.success.add(_elem897); } iprot.readListEnd(); } @@ -84418,9 +84550,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter891 : struct.success) + for (Partition _iter899 : struct.success) { - _iter891.write(oprot); + _iter899.write(oprot); } oprot.writeListEnd(); } @@ -84467,9 +84599,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter892 : struct.success) + for (Partition _iter900 : struct.success) { - _iter892.write(oprot); + _iter900.write(oprot); } } } @@ -84487,14 +84619,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list893.size); - Partition _elem894; - for (int _i895 = 0; _i895 < _list893.size; ++_i895) + org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list901.size); + Partition _elem902; + for (int _i903 = 0; _i903 < _list901.size; ++_i903) { - _elem894 = new Partition(); - _elem894.read(iprot); - struct.success.add(_elem894); + _elem902 = new Partition(); + _elem902.read(iprot); + struct.success.add(_elem902); } } struct.setSuccessIsSet(true); @@ -85557,14 +85689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); - struct.success = new ArrayList(_list896.size); - PartitionSpec _elem897; - for (int _i898 = 0; _i898 < _list896.size; ++_i898) + org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); + struct.success = new ArrayList(_list904.size); + PartitionSpec _elem905; + for (int _i906 = 0; _i906 < _list904.size; ++_i906) { - _elem897 = new PartitionSpec(); - _elem897.read(iprot); - struct.success.add(_elem897); + _elem905 = new PartitionSpec(); + _elem905.read(iprot); + struct.success.add(_elem905); } iprot.readListEnd(); } @@ -85608,9 +85740,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter899 : struct.success) + for (PartitionSpec _iter907 : struct.success) { - _iter899.write(oprot); + _iter907.write(oprot); } oprot.writeListEnd(); } @@ -85657,9 +85789,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter900 : struct.success) + for (PartitionSpec _iter908 : struct.success) { - _iter900.write(oprot); + _iter908.write(oprot); } } } @@ -85677,14 +85809,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list901.size); - PartitionSpec _elem902; - for (int _i903 = 0; _i903 < _list901.size; ++_i903) + org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list909.size); + PartitionSpec _elem910; + for (int _i911 = 0; _i911 < _list909.size; ++_i911) { - _elem902 = new PartitionSpec(); - _elem902.read(iprot); - struct.success.add(_elem902); + _elem910 = new PartitionSpec(); + _elem910.read(iprot); + struct.success.add(_elem910); } } struct.setSuccessIsSet(true); @@ -86663,13 +86795,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); - struct.success = new ArrayList(_list904.size); - String _elem905; - for (int _i906 = 0; _i906 < _list904.size; ++_i906) + org.apache.thrift.protocol.TList _list912 = iprot.readListBegin(); + struct.success = new ArrayList(_list912.size); + String _elem913; + for (int _i914 = 0; _i914 < _list912.size; ++_i914) { - _elem905 = iprot.readString(); - struct.success.add(_elem905); + _elem913 = iprot.readString(); + struct.success.add(_elem913); } iprot.readListEnd(); } @@ -86704,9 +86836,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter907 : struct.success) + for (String _iter915 : struct.success) { - oprot.writeString(_iter907); + oprot.writeString(_iter915); } oprot.writeListEnd(); } @@ -86745,9 +86877,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter908 : struct.success) + for (String _iter916 : struct.success) { - oprot.writeString(_iter908); + oprot.writeString(_iter916); } } } @@ -86762,13 +86894,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list909.size); - String _elem910; - for (int _i911 = 0; _i911 < _list909.size; ++_i911) + org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list917.size); + String _elem918; + for (int _i919 = 0; _i919 < _list917.size; ++_i919) { - _elem910 = iprot.readString(); - struct.success.add(_elem910); + _elem918 = iprot.readString(); + struct.success.add(_elem918); } } struct.setSuccessIsSet(true); @@ -87356,13 +87488,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list912 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list912.size); - String _elem913; - for (int _i914 = 0; _i914 < _list912.size; ++_i914) + org.apache.thrift.protocol.TList _list920 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list920.size); + String _elem921; + for (int _i922 = 0; _i922 < _list920.size; ++_i922) { - _elem913 = iprot.readString(); - struct.part_vals.add(_elem913); + _elem921 = iprot.readString(); + struct.part_vals.add(_elem921); } iprot.readListEnd(); } @@ -87406,9 +87538,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter915 : struct.part_vals) + for (String _iter923 : struct.part_vals) { - oprot.writeString(_iter915); + oprot.writeString(_iter923); } oprot.writeListEnd(); } @@ -87457,9 +87589,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter916 : struct.part_vals) + for (String _iter924 : struct.part_vals) { - oprot.writeString(_iter916); + oprot.writeString(_iter924); } } } @@ -87482,13 +87614,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list917.size); - String _elem918; - for (int _i919 = 0; _i919 < _list917.size; ++_i919) + org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list925.size); + String _elem926; + for (int _i927 = 0; _i927 < _list925.size; ++_i927) { - _elem918 = iprot.readString(); - struct.part_vals.add(_elem918); + _elem926 = iprot.readString(); + struct.part_vals.add(_elem926); } } struct.setPart_valsIsSet(true); @@ -87979,14 +88111,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list920 = iprot.readListBegin(); - struct.success = new ArrayList(_list920.size); - Partition _elem921; - for (int _i922 = 0; _i922 < _list920.size; ++_i922) + org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); + struct.success = new ArrayList(_list928.size); + Partition _elem929; + for (int _i930 = 0; _i930 < _list928.size; ++_i930) { - _elem921 = new Partition(); - _elem921.read(iprot); - struct.success.add(_elem921); + _elem929 = new Partition(); + _elem929.read(iprot); + struct.success.add(_elem929); } iprot.readListEnd(); } @@ -88030,9 +88162,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter923 : struct.success) + for (Partition _iter931 : struct.success) { - _iter923.write(oprot); + _iter931.write(oprot); } oprot.writeListEnd(); } @@ -88079,9 +88211,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter924 : struct.success) + for (Partition _iter932 : struct.success) { - _iter924.write(oprot); + _iter932.write(oprot); } } } @@ -88099,14 +88231,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list925.size); - Partition _elem926; - for (int _i927 = 0; _i927 < _list925.size; ++_i927) + org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list933.size); + Partition _elem934; + for (int _i935 = 0; _i935 < _list933.size; ++_i935) { - _elem926 = new Partition(); - _elem926.read(iprot); - struct.success.add(_elem926); + _elem934 = new Partition(); + _elem934.read(iprot); + struct.success.add(_elem934); } } struct.setSuccessIsSet(true); @@ -88878,13 +89010,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list928.size); - String _elem929; - for (int _i930 = 0; _i930 < _list928.size; ++_i930) + org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list936.size); + String _elem937; + for (int _i938 = 0; _i938 < _list936.size; ++_i938) { - _elem929 = iprot.readString(); - struct.part_vals.add(_elem929); + _elem937 = iprot.readString(); + struct.part_vals.add(_elem937); } iprot.readListEnd(); } @@ -88912,13 +89044,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list931 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list931.size); - String _elem932; - for (int _i933 = 0; _i933 < _list931.size; ++_i933) + org.apache.thrift.protocol.TList _list939 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list939.size); + String _elem940; + for (int _i941 = 0; _i941 < _list939.size; ++_i941) { - _elem932 = iprot.readString(); - struct.group_names.add(_elem932); + _elem940 = iprot.readString(); + struct.group_names.add(_elem940); } iprot.readListEnd(); } @@ -88954,9 +89086,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter934 : struct.part_vals) + for (String _iter942 : struct.part_vals) { - oprot.writeString(_iter934); + oprot.writeString(_iter942); } oprot.writeListEnd(); } @@ -88974,9 +89106,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter935 : struct.group_names) + for (String _iter943 : struct.group_names) { - oprot.writeString(_iter935); + oprot.writeString(_iter943); } oprot.writeListEnd(); } @@ -89028,9 +89160,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter936 : struct.part_vals) + for (String _iter944 : struct.part_vals) { - oprot.writeString(_iter936); + oprot.writeString(_iter944); } } } @@ -89043,9 +89175,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter937 : struct.group_names) + for (String _iter945 : struct.group_names) { - oprot.writeString(_iter937); + oprot.writeString(_iter945); } } } @@ -89065,13 +89197,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list938 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list938.size); - String _elem939; - for (int _i940 = 0; _i940 < _list938.size; ++_i940) + org.apache.thrift.protocol.TList _list946 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list946.size); + String _elem947; + for (int _i948 = 0; _i948 < _list946.size; ++_i948) { - _elem939 = iprot.readString(); - struct.part_vals.add(_elem939); + _elem947 = iprot.readString(); + struct.part_vals.add(_elem947); } } struct.setPart_valsIsSet(true); @@ -89086,13 +89218,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list941.size); - String _elem942; - for (int _i943 = 0; _i943 < _list941.size; ++_i943) + org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list949.size); + String _elem950; + for (int _i951 = 0; _i951 < _list949.size; ++_i951) { - _elem942 = iprot.readString(); - struct.group_names.add(_elem942); + _elem950 = iprot.readString(); + struct.group_names.add(_elem950); } } struct.setGroup_namesIsSet(true); @@ -89579,14 +89711,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); - struct.success = new ArrayList(_list944.size); - Partition _elem945; - for (int _i946 = 0; _i946 < _list944.size; ++_i946) + org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); + struct.success = new ArrayList(_list952.size); + Partition _elem953; + for (int _i954 = 0; _i954 < _list952.size; ++_i954) { - _elem945 = new Partition(); - _elem945.read(iprot); - struct.success.add(_elem945); + _elem953 = new Partition(); + _elem953.read(iprot); + struct.success.add(_elem953); } iprot.readListEnd(); } @@ -89630,9 +89762,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter947 : struct.success) + for (Partition _iter955 : struct.success) { - _iter947.write(oprot); + _iter955.write(oprot); } oprot.writeListEnd(); } @@ -89679,9 +89811,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter948 : struct.success) + for (Partition _iter956 : struct.success) { - _iter948.write(oprot); + _iter956.write(oprot); } } } @@ -89699,14 +89831,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list949.size); - Partition _elem950; - for (int _i951 = 0; _i951 < _list949.size; ++_i951) + org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list957.size); + Partition _elem958; + for (int _i959 = 0; _i959 < _list957.size; ++_i959) { - _elem950 = new Partition(); - _elem950.read(iprot); - struct.success.add(_elem950); + _elem958 = new Partition(); + _elem958.read(iprot); + struct.success.add(_elem958); } } struct.setSuccessIsSet(true); @@ -90299,13 +90431,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list952.size); - String _elem953; - for (int _i954 = 0; _i954 < _list952.size; ++_i954) + org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list960.size); + String _elem961; + for (int _i962 = 0; _i962 < _list960.size; ++_i962) { - _elem953 = iprot.readString(); - struct.part_vals.add(_elem953); + _elem961 = iprot.readString(); + struct.part_vals.add(_elem961); } iprot.readListEnd(); } @@ -90349,9 +90481,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter955 : struct.part_vals) + for (String _iter963 : struct.part_vals) { - oprot.writeString(_iter955); + oprot.writeString(_iter963); } oprot.writeListEnd(); } @@ -90400,9 +90532,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter956 : struct.part_vals) + for (String _iter964 : struct.part_vals) { - oprot.writeString(_iter956); + oprot.writeString(_iter964); } } } @@ -90425,13 +90557,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list957.size); - String _elem958; - for (int _i959 = 0; _i959 < _list957.size; ++_i959) + org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list965.size); + String _elem966; + for (int _i967 = 0; _i967 < _list965.size; ++_i967) { - _elem958 = iprot.readString(); - struct.part_vals.add(_elem958); + _elem966 = iprot.readString(); + struct.part_vals.add(_elem966); } } struct.setPart_valsIsSet(true); @@ -90919,13 +91051,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); - struct.success = new ArrayList(_list960.size); - String _elem961; - for (int _i962 = 0; _i962 < _list960.size; ++_i962) + org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); + struct.success = new ArrayList(_list968.size); + String _elem969; + for (int _i970 = 0; _i970 < _list968.size; ++_i970) { - _elem961 = iprot.readString(); - struct.success.add(_elem961); + _elem969 = iprot.readString(); + struct.success.add(_elem969); } iprot.readListEnd(); } @@ -90969,9 +91101,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter963 : struct.success) + for (String _iter971 : struct.success) { - oprot.writeString(_iter963); + oprot.writeString(_iter971); } oprot.writeListEnd(); } @@ -91018,9 +91150,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter964 : struct.success) + for (String _iter972 : struct.success) { - oprot.writeString(_iter964); + oprot.writeString(_iter972); } } } @@ -91038,13 +91170,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list965.size); - String _elem966; - for (int _i967 = 0; _i967 < _list965.size; ++_i967) + org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list973.size); + String _elem974; + for (int _i975 = 0; _i975 < _list973.size; ++_i975) { - _elem966 = iprot.readString(); - struct.success.add(_elem966); + _elem974 = iprot.readString(); + struct.success.add(_elem974); } } struct.setSuccessIsSet(true); @@ -92211,14 +92343,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); - struct.success = new ArrayList(_list968.size); - Partition _elem969; - for (int _i970 = 0; _i970 < _list968.size; ++_i970) + org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); + struct.success = new ArrayList(_list976.size); + Partition _elem977; + for (int _i978 = 0; _i978 < _list976.size; ++_i978) { - _elem969 = new Partition(); - _elem969.read(iprot); - struct.success.add(_elem969); + _elem977 = new Partition(); + _elem977.read(iprot); + struct.success.add(_elem977); } iprot.readListEnd(); } @@ -92262,9 +92394,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter971 : struct.success) + for (Partition _iter979 : struct.success) { - _iter971.write(oprot); + _iter979.write(oprot); } oprot.writeListEnd(); } @@ -92311,9 +92443,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter972 : struct.success) + for (Partition _iter980 : struct.success) { - _iter972.write(oprot); + _iter980.write(oprot); } } } @@ -92331,14 +92463,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list973.size); - Partition _elem974; - for (int _i975 = 0; _i975 < _list973.size; ++_i975) + org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list981.size); + Partition _elem982; + for (int _i983 = 0; _i983 < _list981.size; ++_i983) { - _elem974 = new Partition(); - _elem974.read(iprot); - struct.success.add(_elem974); + _elem982 = new Partition(); + _elem982.read(iprot); + struct.success.add(_elem982); } } struct.setSuccessIsSet(true); @@ -93505,14 +93637,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); - struct.success = new ArrayList(_list976.size); - PartitionSpec _elem977; - for (int _i978 = 0; _i978 < _list976.size; ++_i978) + org.apache.thrift.protocol.TList _list984 = iprot.readListBegin(); + struct.success = new ArrayList(_list984.size); + PartitionSpec _elem985; + for (int _i986 = 0; _i986 < _list984.size; ++_i986) { - _elem977 = new PartitionSpec(); - _elem977.read(iprot); - struct.success.add(_elem977); + _elem985 = new PartitionSpec(); + _elem985.read(iprot); + struct.success.add(_elem985); } iprot.readListEnd(); } @@ -93556,9 +93688,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter979 : struct.success) + for (PartitionSpec _iter987 : struct.success) { - _iter979.write(oprot); + _iter987.write(oprot); } oprot.writeListEnd(); } @@ -93605,9 +93737,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter980 : struct.success) + for (PartitionSpec _iter988 : struct.success) { - _iter980.write(oprot); + _iter988.write(oprot); } } } @@ -93625,14 +93757,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list981.size); - PartitionSpec _elem982; - for (int _i983 = 0; _i983 < _list981.size; ++_i983) + org.apache.thrift.protocol.TList _list989 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list989.size); + PartitionSpec _elem990; + for (int _i991 = 0; _i991 < _list989.size; ++_i991) { - _elem982 = new PartitionSpec(); - _elem982.read(iprot); - struct.success.add(_elem982); + _elem990 = new PartitionSpec(); + _elem990.read(iprot); + struct.success.add(_elem990); } } struct.setSuccessIsSet(true); @@ -96216,13 +96348,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list984 = iprot.readListBegin(); - struct.names = new ArrayList(_list984.size); - String _elem985; - for (int _i986 = 0; _i986 < _list984.size; ++_i986) + org.apache.thrift.protocol.TList _list992 = iprot.readListBegin(); + struct.names = new ArrayList(_list992.size); + String _elem993; + for (int _i994 = 0; _i994 < _list992.size; ++_i994) { - _elem985 = iprot.readString(); - struct.names.add(_elem985); + _elem993 = iprot.readString(); + struct.names.add(_elem993); } iprot.readListEnd(); } @@ -96258,9 +96390,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter987 : struct.names) + for (String _iter995 : struct.names) { - oprot.writeString(_iter987); + oprot.writeString(_iter995); } oprot.writeListEnd(); } @@ -96303,9 +96435,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter988 : struct.names) + for (String _iter996 : struct.names) { - oprot.writeString(_iter988); + oprot.writeString(_iter996); } } } @@ -96325,13 +96457,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list989 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list989.size); - String _elem990; - for (int _i991 = 0; _i991 < _list989.size; ++_i991) + org.apache.thrift.protocol.TList _list997 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list997.size); + String _elem998; + for (int _i999 = 0; _i999 < _list997.size; ++_i999) { - _elem990 = iprot.readString(); - struct.names.add(_elem990); + _elem998 = iprot.readString(); + struct.names.add(_elem998); } } struct.setNamesIsSet(true); @@ -96818,14 +96950,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list992 = iprot.readListBegin(); - struct.success = new ArrayList(_list992.size); - Partition _elem993; - for (int _i994 = 0; _i994 < _list992.size; ++_i994) + org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin(); + struct.success = new ArrayList(_list1000.size); + Partition _elem1001; + for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002) { - _elem993 = new Partition(); - _elem993.read(iprot); - struct.success.add(_elem993); + _elem1001 = new Partition(); + _elem1001.read(iprot); + struct.success.add(_elem1001); } iprot.readListEnd(); } @@ -96869,9 +97001,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter995 : struct.success) + for (Partition _iter1003 : struct.success) { - _iter995.write(oprot); + _iter1003.write(oprot); } oprot.writeListEnd(); } @@ -96918,9 +97050,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter996 : struct.success) + for (Partition _iter1004 : struct.success) { - _iter996.write(oprot); + _iter1004.write(oprot); } } } @@ -96938,14 +97070,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list997 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list997.size); - Partition _elem998; - for (int _i999 = 0; _i999 < _list997.size; ++_i999) + org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1005.size); + Partition _elem1006; + for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) { - _elem998 = new Partition(); - _elem998.read(iprot); - struct.success.add(_elem998); + _elem1006 = new Partition(); + _elem1006.read(iprot); + struct.success.add(_elem1006); } } struct.setSuccessIsSet(true); @@ -98495,14 +98627,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1000.size); - Partition _elem1001; - for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002) + org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1008.size); + Partition _elem1009; + for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) { - _elem1001 = new Partition(); - _elem1001.read(iprot); - struct.new_parts.add(_elem1001); + _elem1009 = new Partition(); + _elem1009.read(iprot); + struct.new_parts.add(_elem1009); } iprot.readListEnd(); } @@ -98538,9 +98670,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1003 : struct.new_parts) + for (Partition _iter1011 : struct.new_parts) { - _iter1003.write(oprot); + _iter1011.write(oprot); } oprot.writeListEnd(); } @@ -98583,9 +98715,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1004 : struct.new_parts) + for (Partition _iter1012 : struct.new_parts) { - _iter1004.write(oprot); + _iter1012.write(oprot); } } } @@ -98605,14 +98737,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1005.size); - Partition _elem1006; - for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) + org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1013.size); + Partition _elem1014; + for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) { - _elem1006 = new Partition(); - _elem1006.read(iprot); - struct.new_parts.add(_elem1006); + _elem1014 = new Partition(); + _elem1014.read(iprot); + struct.new_parts.add(_elem1014); } } struct.setNew_partsIsSet(true); @@ -99665,14 +99797,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1008.size); - Partition _elem1009; - for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) + org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1016.size); + Partition _elem1017; + for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) { - _elem1009 = new Partition(); - _elem1009.read(iprot); - struct.new_parts.add(_elem1009); + _elem1017 = new Partition(); + _elem1017.read(iprot); + struct.new_parts.add(_elem1017); } iprot.readListEnd(); } @@ -99717,9 +99849,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1011 : struct.new_parts) + for (Partition _iter1019 : struct.new_parts) { - _iter1011.write(oprot); + _iter1019.write(oprot); } oprot.writeListEnd(); } @@ -99770,9 +99902,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1012 : struct.new_parts) + for (Partition _iter1020 : struct.new_parts) { - _iter1012.write(oprot); + _iter1020.write(oprot); } } } @@ -99795,14 +99927,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1013.size); - Partition _elem1014; - for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) + org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1021.size); + Partition _elem1022; + for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) { - _elem1014 = new Partition(); - _elem1014.read(iprot); - struct.new_parts.add(_elem1014); + _elem1022 = new Partition(); + _elem1022.read(iprot); + struct.new_parts.add(_elem1022); } } struct.setNew_partsIsSet(true); @@ -102003,13 +102135,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1016.size); - String _elem1017; - for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) + org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1024.size); + String _elem1025; + for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) { - _elem1017 = iprot.readString(); - struct.part_vals.add(_elem1017); + _elem1025 = iprot.readString(); + struct.part_vals.add(_elem1025); } iprot.readListEnd(); } @@ -102054,9 +102186,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1019 : struct.part_vals) + for (String _iter1027 : struct.part_vals) { - oprot.writeString(_iter1019); + oprot.writeString(_iter1027); } oprot.writeListEnd(); } @@ -102107,9 +102239,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1020 : struct.part_vals) + for (String _iter1028 : struct.part_vals) { - oprot.writeString(_iter1020); + oprot.writeString(_iter1028); } } } @@ -102132,13 +102264,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1021.size); - String _elem1022; - for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) + org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1029.size); + String _elem1030; + for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) { - _elem1022 = iprot.readString(); - struct.part_vals.add(_elem1022); + _elem1030 = iprot.readString(); + struct.part_vals.add(_elem1030); } } struct.setPart_valsIsSet(true); @@ -103012,13 +103144,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1024.size); - String _elem1025; - for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) + org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1032.size); + String _elem1033; + for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) { - _elem1025 = iprot.readString(); - struct.part_vals.add(_elem1025); + _elem1033 = iprot.readString(); + struct.part_vals.add(_elem1033); } iprot.readListEnd(); } @@ -103052,9 +103184,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1027 : struct.part_vals) + for (String _iter1035 : struct.part_vals) { - oprot.writeString(_iter1027); + oprot.writeString(_iter1035); } oprot.writeListEnd(); } @@ -103091,9 +103223,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1028 : struct.part_vals) + for (String _iter1036 : struct.part_vals) { - oprot.writeString(_iter1028); + oprot.writeString(_iter1036); } } } @@ -103108,13 +103240,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1029.size); - String _elem1030; - for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) + org.apache.thrift.protocol.TList _list1037 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1037.size); + String _elem1038; + for (int _i1039 = 0; _i1039 < _list1037.size; ++_i1039) { - _elem1030 = iprot.readString(); - struct.part_vals.add(_elem1030); + _elem1038 = iprot.readString(); + struct.part_vals.add(_elem1038); } } struct.setPart_valsIsSet(true); @@ -105269,13 +105401,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); - struct.success = new ArrayList(_list1032.size); - String _elem1033; - for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) + org.apache.thrift.protocol.TList _list1040 = iprot.readListBegin(); + struct.success = new ArrayList(_list1040.size); + String _elem1041; + for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042) { - _elem1033 = iprot.readString(); - struct.success.add(_elem1033); + _elem1041 = iprot.readString(); + struct.success.add(_elem1041); } iprot.readListEnd(); } @@ -105310,9 +105442,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1035 : struct.success) + for (String _iter1043 : struct.success) { - oprot.writeString(_iter1035); + oprot.writeString(_iter1043); } oprot.writeListEnd(); } @@ -105351,9 +105483,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1036 : struct.success) + for (String _iter1044 : struct.success) { - oprot.writeString(_iter1036); + oprot.writeString(_iter1044); } } } @@ -105368,13 +105500,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1037 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1037.size); - String _elem1038; - for (int _i1039 = 0; _i1039 < _list1037.size; ++_i1039) + org.apache.thrift.protocol.TList _list1045 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1045.size); + String _elem1046; + for (int _i1047 = 0; _i1047 < _list1045.size; ++_i1047) { - _elem1038 = iprot.readString(); - struct.success.add(_elem1038); + _elem1046 = iprot.readString(); + struct.success.add(_elem1046); } } struct.setSuccessIsSet(true); @@ -106137,15 +106269,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1040 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1040.size); - String _key1041; - String _val1042; - for (int _i1043 = 0; _i1043 < _map1040.size; ++_i1043) + org.apache.thrift.protocol.TMap _map1048 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1048.size); + String _key1049; + String _val1050; + for (int _i1051 = 0; _i1051 < _map1048.size; ++_i1051) { - _key1041 = iprot.readString(); - _val1042 = iprot.readString(); - struct.success.put(_key1041, _val1042); + _key1049 = iprot.readString(); + _val1050 = iprot.readString(); + struct.success.put(_key1049, _val1050); } iprot.readMapEnd(); } @@ -106180,10 +106312,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1044 : struct.success.entrySet()) + for (Map.Entry _iter1052 : struct.success.entrySet()) { - oprot.writeString(_iter1044.getKey()); - oprot.writeString(_iter1044.getValue()); + oprot.writeString(_iter1052.getKey()); + oprot.writeString(_iter1052.getValue()); } oprot.writeMapEnd(); } @@ -106222,10 +106354,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1045 : struct.success.entrySet()) + for (Map.Entry _iter1053 : struct.success.entrySet()) { - oprot.writeString(_iter1045.getKey()); - oprot.writeString(_iter1045.getValue()); + oprot.writeString(_iter1053.getKey()); + oprot.writeString(_iter1053.getValue()); } } } @@ -106240,15 +106372,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1046 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1046.size); - String _key1047; - String _val1048; - for (int _i1049 = 0; _i1049 < _map1046.size; ++_i1049) + org.apache.thrift.protocol.TMap _map1054 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1054.size); + String _key1055; + String _val1056; + for (int _i1057 = 0; _i1057 < _map1054.size; ++_i1057) { - _key1047 = iprot.readString(); - _val1048 = iprot.readString(); - struct.success.put(_key1047, _val1048); + _key1055 = iprot.readString(); + _val1056 = iprot.readString(); + struct.success.put(_key1055, _val1056); } } struct.setSuccessIsSet(true); @@ -106843,15 +106975,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1050 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1050.size); - String _key1051; - String _val1052; - for (int _i1053 = 0; _i1053 < _map1050.size; ++_i1053) + org.apache.thrift.protocol.TMap _map1058 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1058.size); + String _key1059; + String _val1060; + for (int _i1061 = 0; _i1061 < _map1058.size; ++_i1061) { - _key1051 = iprot.readString(); - _val1052 = iprot.readString(); - struct.part_vals.put(_key1051, _val1052); + _key1059 = iprot.readString(); + _val1060 = iprot.readString(); + struct.part_vals.put(_key1059, _val1060); } iprot.readMapEnd(); } @@ -106895,10 +107027,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1054 : struct.part_vals.entrySet()) + for (Map.Entry _iter1062 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1054.getKey()); - oprot.writeString(_iter1054.getValue()); + oprot.writeString(_iter1062.getKey()); + oprot.writeString(_iter1062.getValue()); } oprot.writeMapEnd(); } @@ -106949,10 +107081,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1055 : struct.part_vals.entrySet()) + for (Map.Entry _iter1063 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1055.getKey()); - oprot.writeString(_iter1055.getValue()); + oprot.writeString(_iter1063.getKey()); + oprot.writeString(_iter1063.getValue()); } } } @@ -106975,15 +107107,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1056 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1056.size); - String _key1057; - String _val1058; - for (int _i1059 = 0; _i1059 < _map1056.size; ++_i1059) + org.apache.thrift.protocol.TMap _map1064 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1064.size); + String _key1065; + String _val1066; + for (int _i1067 = 0; _i1067 < _map1064.size; ++_i1067) { - _key1057 = iprot.readString(); - _val1058 = iprot.readString(); - struct.part_vals.put(_key1057, _val1058); + _key1065 = iprot.readString(); + _val1066 = iprot.readString(); + struct.part_vals.put(_key1065, _val1066); } } struct.setPart_valsIsSet(true); @@ -108467,15 +108599,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1060 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1060.size); - String _key1061; - String _val1062; - for (int _i1063 = 0; _i1063 < _map1060.size; ++_i1063) + org.apache.thrift.protocol.TMap _map1068 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1068.size); + String _key1069; + String _val1070; + for (int _i1071 = 0; _i1071 < _map1068.size; ++_i1071) { - _key1061 = iprot.readString(); - _val1062 = iprot.readString(); - struct.part_vals.put(_key1061, _val1062); + _key1069 = iprot.readString(); + _val1070 = iprot.readString(); + struct.part_vals.put(_key1069, _val1070); } iprot.readMapEnd(); } @@ -108519,10 +108651,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1064 : struct.part_vals.entrySet()) + for (Map.Entry _iter1072 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1064.getKey()); - oprot.writeString(_iter1064.getValue()); + oprot.writeString(_iter1072.getKey()); + oprot.writeString(_iter1072.getValue()); } oprot.writeMapEnd(); } @@ -108573,10 +108705,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1065 : struct.part_vals.entrySet()) + for (Map.Entry _iter1073 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1065.getKey()); - oprot.writeString(_iter1065.getValue()); + oprot.writeString(_iter1073.getKey()); + oprot.writeString(_iter1073.getValue()); } } } @@ -108599,15 +108731,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1066 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1066.size); - String _key1067; - String _val1068; - for (int _i1069 = 0; _i1069 < _map1066.size; ++_i1069) + org.apache.thrift.protocol.TMap _map1074 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1074.size); + String _key1075; + String _val1076; + for (int _i1077 = 0; _i1077 < _map1074.size; ++_i1077) { - _key1067 = iprot.readString(); - _val1068 = iprot.readString(); - struct.part_vals.put(_key1067, _val1068); + _key1075 = iprot.readString(); + _val1076 = iprot.readString(); + struct.part_vals.put(_key1075, _val1076); } } struct.setPart_valsIsSet(true); @@ -115331,14 +115463,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1070 = iprot.readListBegin(); - struct.success = new ArrayList(_list1070.size); - Index _elem1071; - for (int _i1072 = 0; _i1072 < _list1070.size; ++_i1072) + org.apache.thrift.protocol.TList _list1078 = iprot.readListBegin(); + struct.success = new ArrayList(_list1078.size); + Index _elem1079; + for (int _i1080 = 0; _i1080 < _list1078.size; ++_i1080) { - _elem1071 = new Index(); - _elem1071.read(iprot); - struct.success.add(_elem1071); + _elem1079 = new Index(); + _elem1079.read(iprot); + struct.success.add(_elem1079); } iprot.readListEnd(); } @@ -115382,9 +115514,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Index _iter1073 : struct.success) + for (Index _iter1081 : struct.success) { - _iter1073.write(oprot); + _iter1081.write(oprot); } oprot.writeListEnd(); } @@ -115431,9 +115563,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Index _iter1074 : struct.success) + for (Index _iter1082 : struct.success) { - _iter1074.write(oprot); + _iter1082.write(oprot); } } } @@ -115451,14 +115583,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1075 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1075.size); - Index _elem1076; - for (int _i1077 = 0; _i1077 < _list1075.size; ++_i1077) + org.apache.thrift.protocol.TList _list1083 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1083.size); + Index _elem1084; + for (int _i1085 = 0; _i1085 < _list1083.size; ++_i1085) { - _elem1076 = new Index(); - _elem1076.read(iprot); - struct.success.add(_elem1076); + _elem1084 = new Index(); + _elem1084.read(iprot); + struct.success.add(_elem1084); } } struct.setSuccessIsSet(true); @@ -116437,13 +116569,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1078 = iprot.readListBegin(); - struct.success = new ArrayList(_list1078.size); - String _elem1079; - for (int _i1080 = 0; _i1080 < _list1078.size; ++_i1080) + org.apache.thrift.protocol.TList _list1086 = iprot.readListBegin(); + struct.success = new ArrayList(_list1086.size); + String _elem1087; + for (int _i1088 = 0; _i1088 < _list1086.size; ++_i1088) { - _elem1079 = iprot.readString(); - struct.success.add(_elem1079); + _elem1087 = iprot.readString(); + struct.success.add(_elem1087); } iprot.readListEnd(); } @@ -116478,9 +116610,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1081 : struct.success) + for (String _iter1089 : struct.success) { - oprot.writeString(_iter1081); + oprot.writeString(_iter1089); } oprot.writeListEnd(); } @@ -116519,9 +116651,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1082 : struct.success) + for (String _iter1090 : struct.success) { - oprot.writeString(_iter1082); + oprot.writeString(_iter1090); } } } @@ -116536,13 +116668,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1083 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1083.size); - String _elem1084; - for (int _i1085 = 0; _i1085 < _list1083.size; ++_i1085) + org.apache.thrift.protocol.TList _list1091 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1091.size); + String _elem1092; + for (int _i1093 = 0; _i1093 < _list1091.size; ++_i1093) { - _elem1084 = iprot.readString(); - struct.success.add(_elem1084); + _elem1092 = iprot.readString(); + struct.success.add(_elem1092); } } struct.setSuccessIsSet(true); @@ -134153,13 +134285,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1086 = iprot.readListBegin(); - struct.success = new ArrayList(_list1086.size); - String _elem1087; - for (int _i1088 = 0; _i1088 < _list1086.size; ++_i1088) + org.apache.thrift.protocol.TList _list1094 = iprot.readListBegin(); + struct.success = new ArrayList(_list1094.size); + String _elem1095; + for (int _i1096 = 0; _i1096 < _list1094.size; ++_i1096) { - _elem1087 = iprot.readString(); - struct.success.add(_elem1087); + _elem1095 = iprot.readString(); + struct.success.add(_elem1095); } iprot.readListEnd(); } @@ -134194,9 +134326,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1089 : struct.success) + for (String _iter1097 : struct.success) { - oprot.writeString(_iter1089); + oprot.writeString(_iter1097); } oprot.writeListEnd(); } @@ -134235,9 +134367,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1090 : struct.success) + for (String _iter1098 : struct.success) { - oprot.writeString(_iter1090); + oprot.writeString(_iter1098); } } } @@ -134252,13 +134384,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1091 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1091.size); - String _elem1092; - for (int _i1093 = 0; _i1093 < _list1091.size; ++_i1093) + org.apache.thrift.protocol.TList _list1099 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1099.size); + String _elem1100; + for (int _i1101 = 0; _i1101 < _list1099.size; ++_i1101) { - _elem1092 = iprot.readString(); - struct.success.add(_elem1092); + _elem1100 = iprot.readString(); + struct.success.add(_elem1100); } } struct.setSuccessIsSet(true); @@ -138313,13 +138445,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1094 = iprot.readListBegin(); - struct.success = new ArrayList(_list1094.size); - String _elem1095; - for (int _i1096 = 0; _i1096 < _list1094.size; ++_i1096) + org.apache.thrift.protocol.TList _list1102 = iprot.readListBegin(); + struct.success = new ArrayList(_list1102.size); + String _elem1103; + for (int _i1104 = 0; _i1104 < _list1102.size; ++_i1104) { - _elem1095 = iprot.readString(); - struct.success.add(_elem1095); + _elem1103 = iprot.readString(); + struct.success.add(_elem1103); } iprot.readListEnd(); } @@ -138354,9 +138486,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1097 : struct.success) + for (String _iter1105 : struct.success) { - oprot.writeString(_iter1097); + oprot.writeString(_iter1105); } oprot.writeListEnd(); } @@ -138395,9 +138527,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1098 : struct.success) + for (String _iter1106 : struct.success) { - oprot.writeString(_iter1098); + oprot.writeString(_iter1106); } } } @@ -138412,13 +138544,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1099 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1099.size); - String _elem1100; - for (int _i1101 = 0; _i1101 < _list1099.size; ++_i1101) + org.apache.thrift.protocol.TList _list1107 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1107.size); + String _elem1108; + for (int _i1109 = 0; _i1109 < _list1107.size; ++_i1109) { - _elem1100 = iprot.readString(); - struct.success.add(_elem1100); + _elem1108 = iprot.readString(); + struct.success.add(_elem1108); } } struct.setSuccessIsSet(true); @@ -141709,14 +141841,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1102 = iprot.readListBegin(); - struct.success = new ArrayList(_list1102.size); - Role _elem1103; - for (int _i1104 = 0; _i1104 < _list1102.size; ++_i1104) + org.apache.thrift.protocol.TList _list1110 = iprot.readListBegin(); + struct.success = new ArrayList(_list1110.size); + Role _elem1111; + for (int _i1112 = 0; _i1112 < _list1110.size; ++_i1112) { - _elem1103 = new Role(); - _elem1103.read(iprot); - struct.success.add(_elem1103); + _elem1111 = new Role(); + _elem1111.read(iprot); + struct.success.add(_elem1111); } iprot.readListEnd(); } @@ -141751,9 +141883,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1105 : struct.success) + for (Role _iter1113 : struct.success) { - _iter1105.write(oprot); + _iter1113.write(oprot); } oprot.writeListEnd(); } @@ -141792,9 +141924,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1106 : struct.success) + for (Role _iter1114 : struct.success) { - _iter1106.write(oprot); + _iter1114.write(oprot); } } } @@ -141809,14 +141941,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1107 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1107.size); - Role _elem1108; - for (int _i1109 = 0; _i1109 < _list1107.size; ++_i1109) + org.apache.thrift.protocol.TList _list1115 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1115.size); + Role _elem1116; + for (int _i1117 = 0; _i1117 < _list1115.size; ++_i1117) { - _elem1108 = new Role(); - _elem1108.read(iprot); - struct.success.add(_elem1108); + _elem1116 = new Role(); + _elem1116.read(iprot); + struct.success.add(_elem1116); } } struct.setSuccessIsSet(true); @@ -144821,13 +144953,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1110 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1110.size); - String _elem1111; - for (int _i1112 = 0; _i1112 < _list1110.size; ++_i1112) + org.apache.thrift.protocol.TList _list1118 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1118.size); + String _elem1119; + for (int _i1120 = 0; _i1120 < _list1118.size; ++_i1120) { - _elem1111 = iprot.readString(); - struct.group_names.add(_elem1111); + _elem1119 = iprot.readString(); + struct.group_names.add(_elem1119); } iprot.readListEnd(); } @@ -144863,9 +144995,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1113 : struct.group_names) + for (String _iter1121 : struct.group_names) { - oprot.writeString(_iter1113); + oprot.writeString(_iter1121); } oprot.writeListEnd(); } @@ -144908,9 +145040,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1114 : struct.group_names) + for (String _iter1122 : struct.group_names) { - oprot.writeString(_iter1114); + oprot.writeString(_iter1122); } } } @@ -144931,13 +145063,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1115 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1115.size); - String _elem1116; - for (int _i1117 = 0; _i1117 < _list1115.size; ++_i1117) + org.apache.thrift.protocol.TList _list1123 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1123.size); + String _elem1124; + for (int _i1125 = 0; _i1125 < _list1123.size; ++_i1125) { - _elem1116 = iprot.readString(); - struct.group_names.add(_elem1116); + _elem1124 = iprot.readString(); + struct.group_names.add(_elem1124); } } struct.setGroup_namesIsSet(true); @@ -146395,14 +146527,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1118 = iprot.readListBegin(); - struct.success = new ArrayList(_list1118.size); - HiveObjectPrivilege _elem1119; - for (int _i1120 = 0; _i1120 < _list1118.size; ++_i1120) + org.apache.thrift.protocol.TList _list1126 = iprot.readListBegin(); + struct.success = new ArrayList(_list1126.size); + HiveObjectPrivilege _elem1127; + for (int _i1128 = 0; _i1128 < _list1126.size; ++_i1128) { - _elem1119 = new HiveObjectPrivilege(); - _elem1119.read(iprot); - struct.success.add(_elem1119); + _elem1127 = new HiveObjectPrivilege(); + _elem1127.read(iprot); + struct.success.add(_elem1127); } iprot.readListEnd(); } @@ -146437,9 +146569,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1121 : struct.success) + for (HiveObjectPrivilege _iter1129 : struct.success) { - _iter1121.write(oprot); + _iter1129.write(oprot); } oprot.writeListEnd(); } @@ -146478,9 +146610,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1122 : struct.success) + for (HiveObjectPrivilege _iter1130 : struct.success) { - _iter1122.write(oprot); + _iter1130.write(oprot); } } } @@ -146495,14 +146627,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1123 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1123.size); - HiveObjectPrivilege _elem1124; - for (int _i1125 = 0; _i1125 < _list1123.size; ++_i1125) + org.apache.thrift.protocol.TList _list1131 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1131.size); + HiveObjectPrivilege _elem1132; + for (int _i1133 = 0; _i1133 < _list1131.size; ++_i1133) { - _elem1124 = new HiveObjectPrivilege(); - _elem1124.read(iprot); - struct.success.add(_elem1124); + _elem1132 = new HiveObjectPrivilege(); + _elem1132.read(iprot); + struct.success.add(_elem1132); } } struct.setSuccessIsSet(true); @@ -149404,13 +149536,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1126 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1126.size); - String _elem1127; - for (int _i1128 = 0; _i1128 < _list1126.size; ++_i1128) + org.apache.thrift.protocol.TList _list1134 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1134.size); + String _elem1135; + for (int _i1136 = 0; _i1136 < _list1134.size; ++_i1136) { - _elem1127 = iprot.readString(); - struct.group_names.add(_elem1127); + _elem1135 = iprot.readString(); + struct.group_names.add(_elem1135); } iprot.readListEnd(); } @@ -149441,9 +149573,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1129 : struct.group_names) + for (String _iter1137 : struct.group_names) { - oprot.writeString(_iter1129); + oprot.writeString(_iter1137); } oprot.writeListEnd(); } @@ -149480,9 +149612,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1130 : struct.group_names) + for (String _iter1138 : struct.group_names) { - oprot.writeString(_iter1130); + oprot.writeString(_iter1138); } } } @@ -149498,13 +149630,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1131 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1131.size); - String _elem1132; - for (int _i1133 = 0; _i1133 < _list1131.size; ++_i1133) + org.apache.thrift.protocol.TList _list1139 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1139.size); + String _elem1140; + for (int _i1141 = 0; _i1141 < _list1139.size; ++_i1141) { - _elem1132 = iprot.readString(); - struct.group_names.add(_elem1132); + _elem1140 = iprot.readString(); + struct.group_names.add(_elem1140); } } struct.setGroup_namesIsSet(true); @@ -149907,13 +150039,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1134 = iprot.readListBegin(); - struct.success = new ArrayList(_list1134.size); - String _elem1135; - for (int _i1136 = 0; _i1136 < _list1134.size; ++_i1136) + org.apache.thrift.protocol.TList _list1142 = iprot.readListBegin(); + struct.success = new ArrayList(_list1142.size); + String _elem1143; + for (int _i1144 = 0; _i1144 < _list1142.size; ++_i1144) { - _elem1135 = iprot.readString(); - struct.success.add(_elem1135); + _elem1143 = iprot.readString(); + struct.success.add(_elem1143); } iprot.readListEnd(); } @@ -149948,9 +150080,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1137 : struct.success) + for (String _iter1145 : struct.success) { - oprot.writeString(_iter1137); + oprot.writeString(_iter1145); } oprot.writeListEnd(); } @@ -149989,9 +150121,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1138 : struct.success) + for (String _iter1146 : struct.success) { - oprot.writeString(_iter1138); + oprot.writeString(_iter1146); } } } @@ -150006,13 +150138,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1139 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1139.size); - String _elem1140; - for (int _i1141 = 0; _i1141 < _list1139.size; ++_i1141) + org.apache.thrift.protocol.TList _list1147 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1147.size); + String _elem1148; + for (int _i1149 = 0; _i1149 < _list1147.size; ++_i1149) { - _elem1140 = iprot.readString(); - struct.success.add(_elem1140); + _elem1148 = iprot.readString(); + struct.success.add(_elem1148); } } struct.setSuccessIsSet(true); @@ -155303,13 +155435,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1142 = iprot.readListBegin(); - struct.success = new ArrayList(_list1142.size); - String _elem1143; - for (int _i1144 = 0; _i1144 < _list1142.size; ++_i1144) + org.apache.thrift.protocol.TList _list1150 = iprot.readListBegin(); + struct.success = new ArrayList(_list1150.size); + String _elem1151; + for (int _i1152 = 0; _i1152 < _list1150.size; ++_i1152) { - _elem1143 = iprot.readString(); - struct.success.add(_elem1143); + _elem1151 = iprot.readString(); + struct.success.add(_elem1151); } iprot.readListEnd(); } @@ -155335,9 +155467,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1145 : struct.success) + for (String _iter1153 : struct.success) { - oprot.writeString(_iter1145); + oprot.writeString(_iter1153); } oprot.writeListEnd(); } @@ -155368,9 +155500,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1146 : struct.success) + for (String _iter1154 : struct.success) { - oprot.writeString(_iter1146); + oprot.writeString(_iter1154); } } } @@ -155382,13 +155514,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1147 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1147.size); - String _elem1148; - for (int _i1149 = 0; _i1149 < _list1147.size; ++_i1149) + org.apache.thrift.protocol.TList _list1155 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1155.size); + String _elem1156; + for (int _i1157 = 0; _i1157 < _list1155.size; ++_i1157) { - _elem1148 = iprot.readString(); - struct.success.add(_elem1148); + _elem1156 = iprot.readString(); + struct.success.add(_elem1156); } } struct.setSuccessIsSet(true); @@ -158418,13 +158550,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1150 = iprot.readListBegin(); - struct.success = new ArrayList(_list1150.size); - String _elem1151; - for (int _i1152 = 0; _i1152 < _list1150.size; ++_i1152) + org.apache.thrift.protocol.TList _list1158 = iprot.readListBegin(); + struct.success = new ArrayList(_list1158.size); + String _elem1159; + for (int _i1160 = 0; _i1160 < _list1158.size; ++_i1160) { - _elem1151 = iprot.readString(); - struct.success.add(_elem1151); + _elem1159 = iprot.readString(); + struct.success.add(_elem1159); } iprot.readListEnd(); } @@ -158450,9 +158582,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1153 : struct.success) + for (String _iter1161 : struct.success) { - oprot.writeString(_iter1153); + oprot.writeString(_iter1161); } oprot.writeListEnd(); } @@ -158483,9 +158615,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1154 : struct.success) + for (String _iter1162 : struct.success) { - oprot.writeString(_iter1154); + oprot.writeString(_iter1162); } } } @@ -158497,13 +158629,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1155 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1155.size); - String _elem1156; - for (int _i1157 = 0; _i1157 < _list1155.size; ++_i1157) + org.apache.thrift.protocol.TList _list1163 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1163.size); + String _elem1164; + for (int _i1165 = 0; _i1165 < _list1163.size; ++_i1165) { - _elem1156 = iprot.readString(); - struct.success.add(_elem1156); + _elem1164 = iprot.readString(); + struct.success.add(_elem1164); } } struct.setSuccessIsSet(true); @@ -172471,33 +172603,695 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_result.class, metaDataMap); + } + + public flushCache_result() { + } + + /** + * Performs a deep copy on other. + */ + public flushCache_result(flushCache_result other) { + } + + public flushCache_result deepCopy() { + return new flushCache_result(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof flushCache_result) + return this.equals((flushCache_result)that); + return false; + } + + public boolean equals(flushCache_result that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(flushCache_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("flushCache_result("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class flushCache_resultStandardSchemeFactory implements SchemeFactory { + public flushCache_resultStandardScheme getScheme() { + return new flushCache_resultStandardScheme(); + } + } + + private static class flushCache_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class flushCache_resultTupleSchemeFactory implements SchemeFactory { + public flushCache_resultTupleScheme getScheme() { + return new flushCache_resultTupleScheme(); + } + } + + private static class flushCache_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class get_file_metadata_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_args"); + + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_file_metadata_by_expr_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_by_expr_argsTupleSchemeFactory()); + } + + private GetFileMetadataByExprRequest req; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQ((short)1, "req"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQ + return REQ; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_args.class, metaDataMap); + } + + public get_file_metadata_by_expr_args() { + } + + public get_file_metadata_by_expr_args( + GetFileMetadataByExprRequest req) + { + this(); + this.req = req; + } + + /** + * Performs a deep copy on other. + */ + public get_file_metadata_by_expr_args(get_file_metadata_by_expr_args other) { + if (other.isSetReq()) { + this.req = new GetFileMetadataByExprRequest(other.req); + } + } + + public get_file_metadata_by_expr_args deepCopy() { + return new get_file_metadata_by_expr_args(this); + } + + @Override + public void clear() { + this.req = null; + } + + public GetFileMetadataByExprRequest getReq() { + return this.req; + } + + public void setReq(GetFileMetadataByExprRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((GetFileMetadataByExprRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQ: + return getReq(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQ: + return isSetReq(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_file_metadata_by_expr_args) + return this.equals((get_file_metadata_by_expr_args)that); + return false; + } + + public boolean equals(get_file_metadata_by_expr_args that) { + if (that == null) + return false; + + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + + return list.hashCode(); + } + + @Override + public int compareTo(get_file_metadata_by_expr_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_args("); + boolean first = true; + + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (req != null) { + req.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_file_metadata_by_expr_argsStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_argsStandardScheme getScheme() { + return new get_file_metadata_by_expr_argsStandardScheme(); + } + } + + private static class get_file_metadata_by_expr_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new GetFileMetadataByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_file_metadata_by_expr_argsTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_argsTupleScheme getScheme() { + return new get_file_metadata_by_expr_argsTupleScheme(); + } + } + + private static class get_file_metadata_by_expr_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new GetFileMetadataByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } + } + } + + } + + public static class get_file_metadata_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_file_metadata_by_expr_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_by_expr_resultTupleSchemeFactory()); + } + + private GetFileMetadataByExprResult success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprResult.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_result.class, metaDataMap); } - public flushCache_result() { + public get_file_metadata_by_expr_result() { + } + + public get_file_metadata_by_expr_result( + GetFileMetadataByExprResult success) + { + this(); + this.success = success; } /** * Performs a deep copy on other. */ - public flushCache_result(flushCache_result other) { + public get_file_metadata_by_expr_result(get_file_metadata_by_expr_result other) { + if (other.isSetSuccess()) { + this.success = new GetFileMetadataByExprResult(other.success); + } } - public flushCache_result deepCopy() { - return new flushCache_result(this); + public get_file_metadata_by_expr_result deepCopy() { + return new get_file_metadata_by_expr_result(this); } @Override public void clear() { + this.success = null; + } + + public GetFileMetadataByExprResult getSuccess() { + return this.success; + } + + public void setSuccess(GetFileMetadataByExprResult success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((GetFileMetadataByExprResult)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + } throw new IllegalStateException(); } @@ -172509,6 +173303,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); } throw new IllegalStateException(); } @@ -172517,15 +173313,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof flushCache_result) - return this.equals((flushCache_result)that); + if (that instanceof get_file_metadata_by_expr_result) + return this.equals((get_file_metadata_by_expr_result)that); return false; } - public boolean equals(flushCache_result that) { + public boolean equals(get_file_metadata_by_expr_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + return true; } @@ -172533,17 +173338,32 @@ public boolean equals(flushCache_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + return list.hashCode(); } @Override - public int compareTo(flushCache_result other) { + public int compareTo(get_file_metadata_by_expr_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -172561,9 +173381,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("flushCache_result("); + StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; sb.append(")"); return sb.toString(); } @@ -172571,6 +173398,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -172589,15 +173419,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class flushCache_resultStandardSchemeFactory implements SchemeFactory { - public flushCache_resultStandardScheme getScheme() { - return new flushCache_resultStandardScheme(); + private static class get_file_metadata_by_expr_resultStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_resultStandardScheme getScheme() { + return new get_file_metadata_by_expr_resultStandardScheme(); } } - private static class flushCache_resultStandardScheme extends StandardScheme { + private static class get_file_metadata_by_expr_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -172607,6 +173437,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result s break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new GetFileMetadataByExprResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -172616,49 +173455,68 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class flushCache_resultTupleSchemeFactory implements SchemeFactory { - public flushCache_resultTupleScheme getScheme() { - return new flushCache_resultTupleScheme(); + private static class get_file_metadata_by_expr_resultTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_resultTupleScheme getScheme() { + return new get_file_metadata_by_expr_resultTupleScheme(); } } - private static class flushCache_resultTupleScheme extends TupleScheme { + private static class get_file_metadata_by_expr_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new GetFileMetadataByExprResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } } } } - public static class get_file_metadata_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_args"); + public static class get_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_by_expr_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_by_expr_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_argsTupleSchemeFactory()); } - private GetFileMetadataByExprRequest req; // required + private GetFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -172723,16 +173581,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_args.class, metaDataMap); } - public get_file_metadata_by_expr_args() { + public get_file_metadata_args() { } - public get_file_metadata_by_expr_args( - GetFileMetadataByExprRequest req) + public get_file_metadata_args( + GetFileMetadataRequest req) { this(); this.req = req; @@ -172741,14 +173599,14 @@ public get_file_metadata_by_expr_args( /** * Performs a deep copy on other. */ - public get_file_metadata_by_expr_args(get_file_metadata_by_expr_args other) { + public get_file_metadata_args(get_file_metadata_args other) { if (other.isSetReq()) { - this.req = new GetFileMetadataByExprRequest(other.req); + this.req = new GetFileMetadataRequest(other.req); } } - public get_file_metadata_by_expr_args deepCopy() { - return new get_file_metadata_by_expr_args(this); + public get_file_metadata_args deepCopy() { + return new get_file_metadata_args(this); } @Override @@ -172756,11 +173614,11 @@ public void clear() { this.req = null; } - public GetFileMetadataByExprRequest getReq() { + public GetFileMetadataRequest getReq() { return this.req; } - public void setReq(GetFileMetadataByExprRequest req) { + public void setReq(GetFileMetadataRequest req) { this.req = req; } @@ -172785,7 +173643,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((GetFileMetadataByExprRequest)value); + setReq((GetFileMetadataRequest)value); } break; @@ -172818,12 +173676,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_by_expr_args) - return this.equals((get_file_metadata_by_expr_args)that); + if (that instanceof get_file_metadata_args) + return this.equals((get_file_metadata_args)that); return false; } - public boolean equals(get_file_metadata_by_expr_args that) { + public boolean equals(get_file_metadata_args that) { if (that == null) return false; @@ -172852,7 +173710,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_by_expr_args other) { + public int compareTo(get_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -172886,7 +173744,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_args("); + StringBuilder sb = new StringBuilder("get_file_metadata_args("); boolean first = true; sb.append("req:"); @@ -172924,15 +173782,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_by_expr_argsStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_argsStandardScheme getScheme() { - return new get_file_metadata_by_expr_argsStandardScheme(); + private static class get_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_argsStandardScheme getScheme() { + return new get_file_metadata_argsStandardScheme(); } } - private static class get_file_metadata_by_expr_argsStandardScheme extends StandardScheme { + private static class get_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -172944,7 +173802,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new GetFileMetadataByExprRequest(); + struct.req = new GetFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -172960,7 +173818,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -172975,16 +173833,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_by_expr_argsTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_argsTupleScheme getScheme() { - return new get_file_metadata_by_expr_argsTupleScheme(); + private static class get_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_argsTupleScheme getScheme() { + return new get_file_metadata_argsTupleScheme(); } } - private static class get_file_metadata_by_expr_argsTupleScheme extends TupleScheme { + private static class get_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -172997,11 +173855,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_b } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new GetFileMetadataByExprRequest(); + struct.req = new GetFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -173010,18 +173868,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by } - public static class get_file_metadata_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_result"); + public static class get_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_by_expr_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_by_expr_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_resultTupleSchemeFactory()); } - private GetFileMetadataByExprResult success; // required + private GetFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -173086,16 +173944,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_result.class, metaDataMap); } - public get_file_metadata_by_expr_result() { + public get_file_metadata_result() { } - public get_file_metadata_by_expr_result( - GetFileMetadataByExprResult success) + public get_file_metadata_result( + GetFileMetadataResult success) { this(); this.success = success; @@ -173104,14 +173962,14 @@ public get_file_metadata_by_expr_result( /** * Performs a deep copy on other. */ - public get_file_metadata_by_expr_result(get_file_metadata_by_expr_result other) { + public get_file_metadata_result(get_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new GetFileMetadataByExprResult(other.success); + this.success = new GetFileMetadataResult(other.success); } } - public get_file_metadata_by_expr_result deepCopy() { - return new get_file_metadata_by_expr_result(this); + public get_file_metadata_result deepCopy() { + return new get_file_metadata_result(this); } @Override @@ -173119,11 +173977,11 @@ public void clear() { this.success = null; } - public GetFileMetadataByExprResult getSuccess() { + public GetFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(GetFileMetadataByExprResult success) { + public void setSuccess(GetFileMetadataResult success) { this.success = success; } @@ -173148,7 +174006,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((GetFileMetadataByExprResult)value); + setSuccess((GetFileMetadataResult)value); } break; @@ -173181,12 +174039,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_by_expr_result) - return this.equals((get_file_metadata_by_expr_result)that); + if (that instanceof get_file_metadata_result) + return this.equals((get_file_metadata_result)that); return false; } - public boolean equals(get_file_metadata_by_expr_result that) { + public boolean equals(get_file_metadata_result that) { if (that == null) return false; @@ -173215,7 +174073,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_by_expr_result other) { + public int compareTo(get_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -173249,7 +174107,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_result("); + StringBuilder sb = new StringBuilder("get_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -173287,15 +174145,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_by_expr_resultStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_resultStandardScheme getScheme() { - return new get_file_metadata_by_expr_resultStandardScheme(); + private static class get_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_resultStandardScheme getScheme() { + return new get_file_metadata_resultStandardScheme(); } } - private static class get_file_metadata_by_expr_resultStandardScheme extends StandardScheme { + private static class get_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173307,7 +174165,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetFileMetadataByExprResult(); + struct.success = new GetFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -173323,7 +174181,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -173338,16 +174196,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_by_expr_resultTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_resultTupleScheme getScheme() { - return new get_file_metadata_by_expr_resultTupleScheme(); + private static class get_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_resultTupleScheme getScheme() { + return new get_file_metadata_resultTupleScheme(); } } - private static class get_file_metadata_by_expr_resultTupleScheme extends TupleScheme { + private static class get_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -173360,11 +174218,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_b } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetFileMetadataByExprResult(); + struct.success = new GetFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -173373,18 +174231,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by } - public static class get_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_args"); + public static class put_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new put_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new put_file_metadata_argsTupleSchemeFactory()); } - private GetFileMetadataRequest req; // required + private PutFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -173449,16 +174307,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_args.class, metaDataMap); } - public get_file_metadata_args() { + public put_file_metadata_args() { } - public get_file_metadata_args( - GetFileMetadataRequest req) + public put_file_metadata_args( + PutFileMetadataRequest req) { this(); this.req = req; @@ -173467,14 +174325,14 @@ public get_file_metadata_args( /** * Performs a deep copy on other. */ - public get_file_metadata_args(get_file_metadata_args other) { + public put_file_metadata_args(put_file_metadata_args other) { if (other.isSetReq()) { - this.req = new GetFileMetadataRequest(other.req); + this.req = new PutFileMetadataRequest(other.req); } } - public get_file_metadata_args deepCopy() { - return new get_file_metadata_args(this); + public put_file_metadata_args deepCopy() { + return new put_file_metadata_args(this); } @Override @@ -173482,11 +174340,11 @@ public void clear() { this.req = null; } - public GetFileMetadataRequest getReq() { + public PutFileMetadataRequest getReq() { return this.req; } - public void setReq(GetFileMetadataRequest req) { + public void setReq(PutFileMetadataRequest req) { this.req = req; } @@ -173511,7 +174369,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((GetFileMetadataRequest)value); + setReq((PutFileMetadataRequest)value); } break; @@ -173544,12 +174402,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_args) - return this.equals((get_file_metadata_args)that); + if (that instanceof put_file_metadata_args) + return this.equals((put_file_metadata_args)that); return false; } - public boolean equals(get_file_metadata_args that) { + public boolean equals(put_file_metadata_args that) { if (that == null) return false; @@ -173578,7 +174436,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_args other) { + public int compareTo(put_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -173612,7 +174470,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_args("); + StringBuilder sb = new StringBuilder("put_file_metadata_args("); boolean first = true; sb.append("req:"); @@ -173650,15 +174508,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_argsStandardScheme getScheme() { - return new get_file_metadata_argsStandardScheme(); + private static class put_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public put_file_metadata_argsStandardScheme getScheme() { + return new put_file_metadata_argsStandardScheme(); } } - private static class get_file_metadata_argsStandardScheme extends StandardScheme { + private static class put_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173670,7 +174528,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_a switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new GetFileMetadataRequest(); + struct.req = new PutFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -173686,7 +174544,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -173701,16 +174559,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_argsTupleScheme getScheme() { - return new get_file_metadata_argsTupleScheme(); + private static class put_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public put_file_metadata_argsTupleScheme getScheme() { + return new put_file_metadata_argsTupleScheme(); } } - private static class get_file_metadata_argsTupleScheme extends TupleScheme { + private static class put_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -173723,11 +174581,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_a } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new GetFileMetadataRequest(); + struct.req = new PutFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -173736,18 +174594,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_ar } - public static class get_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_result"); + public static class put_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new put_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new put_file_metadata_resultTupleSchemeFactory()); } - private GetFileMetadataResult success; // required + private PutFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -173812,16 +174670,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_result.class, metaDataMap); } - public get_file_metadata_result() { + public put_file_metadata_result() { } - public get_file_metadata_result( - GetFileMetadataResult success) + public put_file_metadata_result( + PutFileMetadataResult success) { this(); this.success = success; @@ -173830,14 +174688,14 @@ public get_file_metadata_result( /** * Performs a deep copy on other. */ - public get_file_metadata_result(get_file_metadata_result other) { + public put_file_metadata_result(put_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new GetFileMetadataResult(other.success); + this.success = new PutFileMetadataResult(other.success); } } - public get_file_metadata_result deepCopy() { - return new get_file_metadata_result(this); + public put_file_metadata_result deepCopy() { + return new put_file_metadata_result(this); } @Override @@ -173845,11 +174703,11 @@ public void clear() { this.success = null; } - public GetFileMetadataResult getSuccess() { + public PutFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(GetFileMetadataResult success) { + public void setSuccess(PutFileMetadataResult success) { this.success = success; } @@ -173874,7 +174732,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((GetFileMetadataResult)value); + setSuccess((PutFileMetadataResult)value); } break; @@ -173907,12 +174765,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_result) - return this.equals((get_file_metadata_result)that); + if (that instanceof put_file_metadata_result) + return this.equals((put_file_metadata_result)that); return false; } - public boolean equals(get_file_metadata_result that) { + public boolean equals(put_file_metadata_result that) { if (that == null) return false; @@ -173941,7 +174799,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_result other) { + public int compareTo(put_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -173975,7 +174833,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_result("); + StringBuilder sb = new StringBuilder("put_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -174013,15 +174871,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_resultStandardScheme getScheme() { - return new get_file_metadata_resultStandardScheme(); + private static class put_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public put_file_metadata_resultStandardScheme getScheme() { + return new put_file_metadata_resultStandardScheme(); } } - private static class get_file_metadata_resultStandardScheme extends StandardScheme { + private static class put_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174033,7 +174891,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_r switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetFileMetadataResult(); + struct.success = new PutFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -174049,7 +174907,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -174064,16 +174922,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_resultTupleScheme getScheme() { - return new get_file_metadata_resultTupleScheme(); + private static class put_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public put_file_metadata_resultTupleScheme getScheme() { + return new put_file_metadata_resultTupleScheme(); } } - private static class get_file_metadata_resultTupleScheme extends TupleScheme { + private static class put_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -174086,11 +174944,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetFileMetadataResult(); + struct.success = new PutFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -174099,18 +174957,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_re } - public static class put_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_args"); + public static class clear_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new put_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new put_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new clear_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clear_file_metadata_argsTupleSchemeFactory()); } - private PutFileMetadataRequest req; // required + private ClearFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -174175,16 +175033,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_args.class, metaDataMap); } - public put_file_metadata_args() { + public clear_file_metadata_args() { } - public put_file_metadata_args( - PutFileMetadataRequest req) + public clear_file_metadata_args( + ClearFileMetadataRequest req) { this(); this.req = req; @@ -174193,14 +175051,14 @@ public put_file_metadata_args( /** * Performs a deep copy on other. */ - public put_file_metadata_args(put_file_metadata_args other) { + public clear_file_metadata_args(clear_file_metadata_args other) { if (other.isSetReq()) { - this.req = new PutFileMetadataRequest(other.req); + this.req = new ClearFileMetadataRequest(other.req); } } - public put_file_metadata_args deepCopy() { - return new put_file_metadata_args(this); + public clear_file_metadata_args deepCopy() { + return new clear_file_metadata_args(this); } @Override @@ -174208,11 +175066,11 @@ public void clear() { this.req = null; } - public PutFileMetadataRequest getReq() { + public ClearFileMetadataRequest getReq() { return this.req; } - public void setReq(PutFileMetadataRequest req) { + public void setReq(ClearFileMetadataRequest req) { this.req = req; } @@ -174237,7 +175095,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((PutFileMetadataRequest)value); + setReq((ClearFileMetadataRequest)value); } break; @@ -174270,12 +175128,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof put_file_metadata_args) - return this.equals((put_file_metadata_args)that); + if (that instanceof clear_file_metadata_args) + return this.equals((clear_file_metadata_args)that); return false; } - public boolean equals(put_file_metadata_args that) { + public boolean equals(clear_file_metadata_args that) { if (that == null) return false; @@ -174304,7 +175162,7 @@ public int hashCode() { } @Override - public int compareTo(put_file_metadata_args other) { + public int compareTo(clear_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -174338,7 +175196,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("put_file_metadata_args("); + StringBuilder sb = new StringBuilder("clear_file_metadata_args("); boolean first = true; sb.append("req:"); @@ -174376,15 +175234,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class put_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public put_file_metadata_argsStandardScheme getScheme() { - return new put_file_metadata_argsStandardScheme(); + private static class clear_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public clear_file_metadata_argsStandardScheme getScheme() { + return new clear_file_metadata_argsStandardScheme(); } } - private static class put_file_metadata_argsStandardScheme extends StandardScheme { + private static class clear_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174396,7 +175254,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_a switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new PutFileMetadataRequest(); + struct.req = new ClearFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -174412,7 +175270,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -174427,16 +175285,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_ } - private static class put_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public put_file_metadata_argsTupleScheme getScheme() { - return new put_file_metadata_argsTupleScheme(); + private static class clear_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public clear_file_metadata_argsTupleScheme getScheme() { + return new clear_file_metadata_argsTupleScheme(); } } - private static class put_file_metadata_argsTupleScheme extends TupleScheme { + private static class clear_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -174449,11 +175307,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_a } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new PutFileMetadataRequest(); + struct.req = new ClearFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -174462,18 +175320,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_ar } - public static class put_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_result"); + public static class clear_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new put_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new put_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new clear_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clear_file_metadata_resultTupleSchemeFactory()); } - private PutFileMetadataResult success; // required + private ClearFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -174538,16 +175396,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_result.class, metaDataMap); } - public put_file_metadata_result() { + public clear_file_metadata_result() { } - public put_file_metadata_result( - PutFileMetadataResult success) + public clear_file_metadata_result( + ClearFileMetadataResult success) { this(); this.success = success; @@ -174556,14 +175414,14 @@ public put_file_metadata_result( /** * Performs a deep copy on other. */ - public put_file_metadata_result(put_file_metadata_result other) { + public clear_file_metadata_result(clear_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new PutFileMetadataResult(other.success); + this.success = new ClearFileMetadataResult(other.success); } } - public put_file_metadata_result deepCopy() { - return new put_file_metadata_result(this); + public clear_file_metadata_result deepCopy() { + return new clear_file_metadata_result(this); } @Override @@ -174571,11 +175429,11 @@ public void clear() { this.success = null; } - public PutFileMetadataResult getSuccess() { + public ClearFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(PutFileMetadataResult success) { + public void setSuccess(ClearFileMetadataResult success) { this.success = success; } @@ -174600,7 +175458,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((PutFileMetadataResult)value); + setSuccess((ClearFileMetadataResult)value); } break; @@ -174633,12 +175491,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof put_file_metadata_result) - return this.equals((put_file_metadata_result)that); + if (that instanceof clear_file_metadata_result) + return this.equals((clear_file_metadata_result)that); return false; } - public boolean equals(put_file_metadata_result that) { + public boolean equals(clear_file_metadata_result that) { if (that == null) return false; @@ -174667,7 +175525,7 @@ public int hashCode() { } @Override - public int compareTo(put_file_metadata_result other) { + public int compareTo(clear_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -174701,7 +175559,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("put_file_metadata_result("); + StringBuilder sb = new StringBuilder("clear_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -174739,15 +175597,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class put_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public put_file_metadata_resultStandardScheme getScheme() { - return new put_file_metadata_resultStandardScheme(); + private static class clear_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public clear_file_metadata_resultStandardScheme getScheme() { + return new clear_file_metadata_resultStandardScheme(); } } - private static class put_file_metadata_resultStandardScheme extends StandardScheme { + private static class clear_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174759,7 +175617,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_r switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new PutFileMetadataResult(); + struct.success = new ClearFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -174775,7 +175633,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -174790,16 +175648,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_ } - private static class put_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public put_file_metadata_resultTupleScheme getScheme() { - return new put_file_metadata_resultTupleScheme(); + private static class clear_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public clear_file_metadata_resultTupleScheme getScheme() { + return new clear_file_metadata_resultTupleScheme(); } } - private static class put_file_metadata_resultTupleScheme extends TupleScheme { + private static class clear_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -174812,11 +175670,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new PutFileMetadataResult(); + struct.success = new ClearFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -174825,18 +175683,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_re } - public static class clear_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_args"); + public static class cache_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new clear_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new clear_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new cache_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new cache_file_metadata_argsTupleSchemeFactory()); } - private ClearFileMetadataRequest req; // required + private CacheFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -174901,16 +175759,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_args.class, metaDataMap); } - public clear_file_metadata_args() { + public cache_file_metadata_args() { } - public clear_file_metadata_args( - ClearFileMetadataRequest req) + public cache_file_metadata_args( + CacheFileMetadataRequest req) { this(); this.req = req; @@ -174919,14 +175777,14 @@ public clear_file_metadata_args( /** * Performs a deep copy on other. */ - public clear_file_metadata_args(clear_file_metadata_args other) { + public cache_file_metadata_args(cache_file_metadata_args other) { if (other.isSetReq()) { - this.req = new ClearFileMetadataRequest(other.req); + this.req = new CacheFileMetadataRequest(other.req); } } - public clear_file_metadata_args deepCopy() { - return new clear_file_metadata_args(this); + public cache_file_metadata_args deepCopy() { + return new cache_file_metadata_args(this); } @Override @@ -174934,11 +175792,11 @@ public void clear() { this.req = null; } - public ClearFileMetadataRequest getReq() { + public CacheFileMetadataRequest getReq() { return this.req; } - public void setReq(ClearFileMetadataRequest req) { + public void setReq(CacheFileMetadataRequest req) { this.req = req; } @@ -174963,7 +175821,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((ClearFileMetadataRequest)value); + setReq((CacheFileMetadataRequest)value); } break; @@ -174996,12 +175854,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof clear_file_metadata_args) - return this.equals((clear_file_metadata_args)that); + if (that instanceof cache_file_metadata_args) + return this.equals((cache_file_metadata_args)that); return false; } - public boolean equals(clear_file_metadata_args that) { + public boolean equals(cache_file_metadata_args that) { if (that == null) return false; @@ -175030,7 +175888,7 @@ public int hashCode() { } @Override - public int compareTo(clear_file_metadata_args other) { + public int compareTo(cache_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175064,7 +175922,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("clear_file_metadata_args("); + StringBuilder sb = new StringBuilder("cache_file_metadata_args("); boolean first = true; sb.append("req:"); @@ -175102,15 +175960,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class clear_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public clear_file_metadata_argsStandardScheme getScheme() { - return new clear_file_metadata_argsStandardScheme(); + private static class cache_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public cache_file_metadata_argsStandardScheme getScheme() { + return new cache_file_metadata_argsStandardScheme(); } } - private static class clear_file_metadata_argsStandardScheme extends StandardScheme { + private static class cache_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175122,7 +175980,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new ClearFileMetadataRequest(); + struct.req = new CacheFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -175138,7 +175996,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175153,16 +176011,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadat } - private static class clear_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public clear_file_metadata_argsTupleScheme getScheme() { - return new clear_file_metadata_argsTupleScheme(); + private static class cache_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public cache_file_metadata_argsTupleScheme getScheme() { + return new cache_file_metadata_argsTupleScheme(); } } - private static class clear_file_metadata_argsTupleScheme extends TupleScheme { + private static class cache_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -175175,11 +176033,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new ClearFileMetadataRequest(); + struct.req = new CacheFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -175188,18 +176046,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_ } - public static class clear_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_result"); + public static class cache_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new clear_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new clear_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new cache_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new cache_file_metadata_resultTupleSchemeFactory()); } - private ClearFileMetadataResult success; // required + private CacheFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175264,16 +176122,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_result.class, metaDataMap); } - public clear_file_metadata_result() { + public cache_file_metadata_result() { } - public clear_file_metadata_result( - ClearFileMetadataResult success) + public cache_file_metadata_result( + CacheFileMetadataResult success) { this(); this.success = success; @@ -175282,14 +176140,14 @@ public clear_file_metadata_result( /** * Performs a deep copy on other. */ - public clear_file_metadata_result(clear_file_metadata_result other) { + public cache_file_metadata_result(cache_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new ClearFileMetadataResult(other.success); + this.success = new CacheFileMetadataResult(other.success); } } - public clear_file_metadata_result deepCopy() { - return new clear_file_metadata_result(this); + public cache_file_metadata_result deepCopy() { + return new cache_file_metadata_result(this); } @Override @@ -175297,11 +176155,11 @@ public void clear() { this.success = null; } - public ClearFileMetadataResult getSuccess() { + public CacheFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(ClearFileMetadataResult success) { + public void setSuccess(CacheFileMetadataResult success) { this.success = success; } @@ -175326,7 +176184,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((ClearFileMetadataResult)value); + setSuccess((CacheFileMetadataResult)value); } break; @@ -175359,12 +176217,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof clear_file_metadata_result) - return this.equals((clear_file_metadata_result)that); + if (that instanceof cache_file_metadata_result) + return this.equals((cache_file_metadata_result)that); return false; } - public boolean equals(clear_file_metadata_result that) { + public boolean equals(cache_file_metadata_result that) { if (that == null) return false; @@ -175393,7 +176251,7 @@ public int hashCode() { } @Override - public int compareTo(clear_file_metadata_result other) { + public int compareTo(cache_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175427,7 +176285,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("clear_file_metadata_result("); + StringBuilder sb = new StringBuilder("cache_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -175465,15 +176323,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class clear_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public clear_file_metadata_resultStandardScheme getScheme() { - return new clear_file_metadata_resultStandardScheme(); + private static class cache_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public cache_file_metadata_resultStandardScheme getScheme() { + return new cache_file_metadata_resultStandardScheme(); } } - private static class clear_file_metadata_resultStandardScheme extends StandardScheme { + private static class cache_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175485,7 +176343,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new ClearFileMetadataResult(); + struct.success = new CacheFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -175501,7 +176359,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175516,16 +176374,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadat } - private static class clear_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public clear_file_metadata_resultTupleScheme getScheme() { - return new clear_file_metadata_resultTupleScheme(); + private static class cache_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public cache_file_metadata_resultTupleScheme getScheme() { + return new cache_file_metadata_resultTupleScheme(); } } - private static class clear_file_metadata_resultTupleScheme extends TupleScheme { + private static class cache_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -175538,11 +176396,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new ClearFileMetadataResult(); + struct.success = new CacheFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -175551,18 +176409,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_ } - public static class cache_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_args"); + public static class get_next_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_write_id_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new cache_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new cache_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_next_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_write_id_argsTupleSchemeFactory()); } - private CacheFileMetadataRequest req; // required + private GetNextWriteIdRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175627,16 +176485,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetNextWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_write_id_args.class, metaDataMap); } - public cache_file_metadata_args() { + public get_next_write_id_args() { } - public cache_file_metadata_args( - CacheFileMetadataRequest req) + public get_next_write_id_args( + GetNextWriteIdRequest req) { this(); this.req = req; @@ -175645,14 +176503,14 @@ public cache_file_metadata_args( /** * Performs a deep copy on other. */ - public cache_file_metadata_args(cache_file_metadata_args other) { + public get_next_write_id_args(get_next_write_id_args other) { if (other.isSetReq()) { - this.req = new CacheFileMetadataRequest(other.req); + this.req = new GetNextWriteIdRequest(other.req); } } - public cache_file_metadata_args deepCopy() { - return new cache_file_metadata_args(this); + public get_next_write_id_args deepCopy() { + return new get_next_write_id_args(this); } @Override @@ -175660,11 +176518,11 @@ public void clear() { this.req = null; } - public CacheFileMetadataRequest getReq() { + public GetNextWriteIdRequest getReq() { return this.req; } - public void setReq(CacheFileMetadataRequest req) { + public void setReq(GetNextWriteIdRequest req) { this.req = req; } @@ -175689,7 +176547,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((CacheFileMetadataRequest)value); + setReq((GetNextWriteIdRequest)value); } break; @@ -175722,12 +176580,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof cache_file_metadata_args) - return this.equals((cache_file_metadata_args)that); + if (that instanceof get_next_write_id_args) + return this.equals((get_next_write_id_args)that); return false; } - public boolean equals(cache_file_metadata_args that) { + public boolean equals(get_next_write_id_args that) { if (that == null) return false; @@ -175756,7 +176614,7 @@ public int hashCode() { } @Override - public int compareTo(cache_file_metadata_args other) { + public int compareTo(get_next_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175790,7 +176648,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("cache_file_metadata_args("); + StringBuilder sb = new StringBuilder("get_next_write_id_args("); boolean first = true; sb.append("req:"); @@ -175828,15 +176686,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class cache_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public cache_file_metadata_argsStandardScheme getScheme() { - return new cache_file_metadata_argsStandardScheme(); + private static class get_next_write_id_argsStandardSchemeFactory implements SchemeFactory { + public get_next_write_id_argsStandardScheme getScheme() { + return new get_next_write_id_argsStandardScheme(); } } - private static class cache_file_metadata_argsStandardScheme extends StandardScheme { + private static class get_next_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175848,7 +176706,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new CacheFileMetadataRequest(); + struct.req = new GetNextWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -175864,7 +176722,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175879,16 +176737,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadat } - private static class cache_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public cache_file_metadata_argsTupleScheme getScheme() { - return new cache_file_metadata_argsTupleScheme(); + private static class get_next_write_id_argsTupleSchemeFactory implements SchemeFactory { + public get_next_write_id_argsTupleScheme getScheme() { + return new get_next_write_id_argsTupleScheme(); } } - private static class cache_file_metadata_argsTupleScheme extends TupleScheme { + private static class get_next_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -175901,11 +176759,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new CacheFileMetadataRequest(); + struct.req = new GetNextWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -175914,18 +176772,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_ } - public static class cache_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_result"); + public static class get_next_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_write_id_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new cache_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new cache_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_next_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_write_id_resultTupleSchemeFactory()); } - private CacheFileMetadataResult success; // required + private GetNextWriteIdResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175990,16 +176848,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetNextWriteIdResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_write_id_result.class, metaDataMap); } - public cache_file_metadata_result() { + public get_next_write_id_result() { } - public cache_file_metadata_result( - CacheFileMetadataResult success) + public get_next_write_id_result( + GetNextWriteIdResult success) { this(); this.success = success; @@ -176008,14 +176866,14 @@ public cache_file_metadata_result( /** * Performs a deep copy on other. */ - public cache_file_metadata_result(cache_file_metadata_result other) { + public get_next_write_id_result(get_next_write_id_result other) { if (other.isSetSuccess()) { - this.success = new CacheFileMetadataResult(other.success); + this.success = new GetNextWriteIdResult(other.success); } } - public cache_file_metadata_result deepCopy() { - return new cache_file_metadata_result(this); + public get_next_write_id_result deepCopy() { + return new get_next_write_id_result(this); } @Override @@ -176023,11 +176881,11 @@ public void clear() { this.success = null; } - public CacheFileMetadataResult getSuccess() { + public GetNextWriteIdResult getSuccess() { return this.success; } - public void setSuccess(CacheFileMetadataResult success) { + public void setSuccess(GetNextWriteIdResult success) { this.success = success; } @@ -176052,7 +176910,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((CacheFileMetadataResult)value); + setSuccess((GetNextWriteIdResult)value); } break; @@ -176085,12 +176943,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof cache_file_metadata_result) - return this.equals((cache_file_metadata_result)that); + if (that instanceof get_next_write_id_result) + return this.equals((get_next_write_id_result)that); return false; } - public boolean equals(cache_file_metadata_result that) { + public boolean equals(get_next_write_id_result that) { if (that == null) return false; @@ -176119,7 +176977,7 @@ public int hashCode() { } @Override - public int compareTo(cache_file_metadata_result other) { + public int compareTo(get_next_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -176153,7 +177011,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("cache_file_metadata_result("); + StringBuilder sb = new StringBuilder("get_next_write_id_result("); boolean first = true; sb.append("success:"); @@ -176191,15 +177049,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class cache_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public cache_file_metadata_resultStandardScheme getScheme() { - return new cache_file_metadata_resultStandardScheme(); + private static class get_next_write_id_resultStandardSchemeFactory implements SchemeFactory { + public get_next_write_id_resultStandardScheme getScheme() { + return new get_next_write_id_resultStandardScheme(); } } - private static class cache_file_metadata_resultStandardScheme extends StandardScheme { + private static class get_next_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176211,7 +177069,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new CacheFileMetadataResult(); + struct.success = new GetNextWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -176227,7 +177085,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -176242,16 +177100,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadat } - private static class cache_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public cache_file_metadata_resultTupleScheme getScheme() { - return new cache_file_metadata_resultTupleScheme(); + private static class get_next_write_id_resultTupleSchemeFactory implements SchemeFactory { + public get_next_write_id_resultTupleScheme getScheme() { + return new get_next_write_id_resultTupleScheme(); } } - private static class cache_file_metadata_resultTupleScheme extends TupleScheme { + private static class get_next_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -176264,11 +177122,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new CacheFileMetadataResult(); + struct.success = new GetNextWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -176277,18 +177135,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_ } - public static class get_next_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_write_id_args"); + public static class finalize_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finalize_write_id_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_next_write_id_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_next_write_id_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new finalize_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new finalize_write_id_argsTupleSchemeFactory()); } - private GetNextWriteIdRequest req; // required + private FinalizeWriteIdRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -176353,16 +177211,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetNextWriteIdRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FinalizeWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_write_id_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finalize_write_id_args.class, metaDataMap); } - public get_next_write_id_args() { + public finalize_write_id_args() { } - public get_next_write_id_args( - GetNextWriteIdRequest req) + public finalize_write_id_args( + FinalizeWriteIdRequest req) { this(); this.req = req; @@ -176371,14 +177229,14 @@ public get_next_write_id_args( /** * Performs a deep copy on other. */ - public get_next_write_id_args(get_next_write_id_args other) { + public finalize_write_id_args(finalize_write_id_args other) { if (other.isSetReq()) { - this.req = new GetNextWriteIdRequest(other.req); + this.req = new FinalizeWriteIdRequest(other.req); } } - public get_next_write_id_args deepCopy() { - return new get_next_write_id_args(this); + public finalize_write_id_args deepCopy() { + return new finalize_write_id_args(this); } @Override @@ -176386,11 +177244,11 @@ public void clear() { this.req = null; } - public GetNextWriteIdRequest getReq() { + public FinalizeWriteIdRequest getReq() { return this.req; } - public void setReq(GetNextWriteIdRequest req) { + public void setReq(FinalizeWriteIdRequest req) { this.req = req; } @@ -176415,7 +177273,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((GetNextWriteIdRequest)value); + setReq((FinalizeWriteIdRequest)value); } break; @@ -176448,12 +177306,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_next_write_id_args) - return this.equals((get_next_write_id_args)that); + if (that instanceof finalize_write_id_args) + return this.equals((finalize_write_id_args)that); return false; } - public boolean equals(get_next_write_id_args that) { + public boolean equals(finalize_write_id_args that) { if (that == null) return false; @@ -176482,7 +177340,7 @@ public int hashCode() { } @Override - public int compareTo(get_next_write_id_args other) { + public int compareTo(finalize_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -176516,7 +177374,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_next_write_id_args("); + StringBuilder sb = new StringBuilder("finalize_write_id_args("); boolean first = true; sb.append("req:"); @@ -176554,15 +177412,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_next_write_id_argsStandardSchemeFactory implements SchemeFactory { - public get_next_write_id_argsStandardScheme getScheme() { - return new get_next_write_id_argsStandardScheme(); + private static class finalize_write_id_argsStandardSchemeFactory implements SchemeFactory { + public finalize_write_id_argsStandardScheme getScheme() { + return new finalize_write_id_argsStandardScheme(); } } - private static class get_next_write_id_argsStandardScheme extends StandardScheme { + private static class finalize_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176574,7 +177432,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_a switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new GetNextWriteIdRequest(); + struct.req = new FinalizeWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -176590,7 +177448,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_write_id_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, finalize_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -176605,16 +177463,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_write_id_ } - private static class get_next_write_id_argsTupleSchemeFactory implements SchemeFactory { - public get_next_write_id_argsTupleScheme getScheme() { - return new get_next_write_id_argsTupleScheme(); + private static class finalize_write_id_argsTupleSchemeFactory implements SchemeFactory { + public finalize_write_id_argsTupleScheme getScheme() { + return new finalize_write_id_argsTupleScheme(); } } - private static class get_next_write_id_argsTupleScheme extends TupleScheme { + private static class finalize_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -176627,11 +177485,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_a } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new GetNextWriteIdRequest(); + struct.req = new FinalizeWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -176640,18 +177498,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_ar } - public static class get_next_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_write_id_result"); + public static class finalize_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finalize_write_id_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_next_write_id_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_next_write_id_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new finalize_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new finalize_write_id_resultTupleSchemeFactory()); } - private GetNextWriteIdResult success; // required + private FinalizeWriteIdResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -176716,16 +177574,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetNextWriteIdResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FinalizeWriteIdResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_write_id_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finalize_write_id_result.class, metaDataMap); } - public get_next_write_id_result() { + public finalize_write_id_result() { } - public get_next_write_id_result( - GetNextWriteIdResult success) + public finalize_write_id_result( + FinalizeWriteIdResult success) { this(); this.success = success; @@ -176734,14 +177592,14 @@ public get_next_write_id_result( /** * Performs a deep copy on other. */ - public get_next_write_id_result(get_next_write_id_result other) { + public finalize_write_id_result(finalize_write_id_result other) { if (other.isSetSuccess()) { - this.success = new GetNextWriteIdResult(other.success); + this.success = new FinalizeWriteIdResult(other.success); } } - public get_next_write_id_result deepCopy() { - return new get_next_write_id_result(this); + public finalize_write_id_result deepCopy() { + return new finalize_write_id_result(this); } @Override @@ -176749,11 +177607,11 @@ public void clear() { this.success = null; } - public GetNextWriteIdResult getSuccess() { + public FinalizeWriteIdResult getSuccess() { return this.success; } - public void setSuccess(GetNextWriteIdResult success) { + public void setSuccess(FinalizeWriteIdResult success) { this.success = success; } @@ -176778,7 +177636,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((GetNextWriteIdResult)value); + setSuccess((FinalizeWriteIdResult)value); } break; @@ -176811,12 +177669,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_next_write_id_result) - return this.equals((get_next_write_id_result)that); + if (that instanceof finalize_write_id_result) + return this.equals((finalize_write_id_result)that); return false; } - public boolean equals(get_next_write_id_result that) { + public boolean equals(finalize_write_id_result that) { if (that == null) return false; @@ -176845,7 +177703,7 @@ public int hashCode() { } @Override - public int compareTo(get_next_write_id_result other) { + public int compareTo(finalize_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -176879,7 +177737,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_next_write_id_result("); + StringBuilder sb = new StringBuilder("finalize_write_id_result("); boolean first = true; sb.append("success:"); @@ -176917,15 +177775,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_next_write_id_resultStandardSchemeFactory implements SchemeFactory { - public get_next_write_id_resultStandardScheme getScheme() { - return new get_next_write_id_resultStandardScheme(); + private static class finalize_write_id_resultStandardSchemeFactory implements SchemeFactory { + public finalize_write_id_resultStandardScheme getScheme() { + return new finalize_write_id_resultStandardScheme(); } } - private static class get_next_write_id_resultStandardScheme extends StandardScheme { + private static class finalize_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176937,7 +177795,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_r switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetNextWriteIdResult(); + struct.success = new FinalizeWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -176953,7 +177811,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_write_id_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, finalize_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -176968,16 +177826,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_write_id_ } - private static class get_next_write_id_resultTupleSchemeFactory implements SchemeFactory { - public get_next_write_id_resultTupleScheme getScheme() { - return new get_next_write_id_resultTupleScheme(); + private static class finalize_write_id_resultTupleSchemeFactory implements SchemeFactory { + public finalize_write_id_resultTupleScheme getScheme() { + return new finalize_write_id_resultTupleScheme(); } } - private static class get_next_write_id_resultTupleScheme extends TupleScheme { + private static class finalize_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -176990,11 +177848,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetNextWriteIdResult(); + struct.success = new FinalizeWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -177003,18 +177861,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_re } - public static class finalize_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finalize_write_id_args"); + public static class heartbeat_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_write_id_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new finalize_write_id_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new finalize_write_id_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new heartbeat_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new heartbeat_write_id_argsTupleSchemeFactory()); } - private FinalizeWriteIdRequest req; // required + private HeartbeatWriteIdRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -177079,16 +177937,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FinalizeWriteIdRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finalize_write_id_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_write_id_args.class, metaDataMap); } - public finalize_write_id_args() { + public heartbeat_write_id_args() { } - public finalize_write_id_args( - FinalizeWriteIdRequest req) + public heartbeat_write_id_args( + HeartbeatWriteIdRequest req) { this(); this.req = req; @@ -177097,14 +177955,14 @@ public finalize_write_id_args( /** * Performs a deep copy on other. */ - public finalize_write_id_args(finalize_write_id_args other) { + public heartbeat_write_id_args(heartbeat_write_id_args other) { if (other.isSetReq()) { - this.req = new FinalizeWriteIdRequest(other.req); + this.req = new HeartbeatWriteIdRequest(other.req); } } - public finalize_write_id_args deepCopy() { - return new finalize_write_id_args(this); + public heartbeat_write_id_args deepCopy() { + return new heartbeat_write_id_args(this); } @Override @@ -177112,11 +177970,11 @@ public void clear() { this.req = null; } - public FinalizeWriteIdRequest getReq() { + public HeartbeatWriteIdRequest getReq() { return this.req; } - public void setReq(FinalizeWriteIdRequest req) { + public void setReq(HeartbeatWriteIdRequest req) { this.req = req; } @@ -177141,7 +177999,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((FinalizeWriteIdRequest)value); + setReq((HeartbeatWriteIdRequest)value); } break; @@ -177174,12 +178032,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof finalize_write_id_args) - return this.equals((finalize_write_id_args)that); + if (that instanceof heartbeat_write_id_args) + return this.equals((heartbeat_write_id_args)that); return false; } - public boolean equals(finalize_write_id_args that) { + public boolean equals(heartbeat_write_id_args that) { if (that == null) return false; @@ -177208,7 +178066,7 @@ public int hashCode() { } @Override - public int compareTo(finalize_write_id_args other) { + public int compareTo(heartbeat_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -177242,7 +178100,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("finalize_write_id_args("); + StringBuilder sb = new StringBuilder("heartbeat_write_id_args("); boolean first = true; sb.append("req:"); @@ -177280,15 +178138,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class finalize_write_id_argsStandardSchemeFactory implements SchemeFactory { - public finalize_write_id_argsStandardScheme getScheme() { - return new finalize_write_id_argsStandardScheme(); + private static class heartbeat_write_id_argsStandardSchemeFactory implements SchemeFactory { + public heartbeat_write_id_argsStandardScheme getScheme() { + return new heartbeat_write_id_argsStandardScheme(); } } - private static class finalize_write_id_argsStandardScheme extends StandardScheme { + private static class heartbeat_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -177300,7 +178158,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_a switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new FinalizeWriteIdRequest(); + struct.req = new HeartbeatWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -177316,7 +178174,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, finalize_write_id_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -177331,16 +178189,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, finalize_write_id_ } - private static class finalize_write_id_argsTupleSchemeFactory implements SchemeFactory { - public finalize_write_id_argsTupleScheme getScheme() { - return new finalize_write_id_argsTupleScheme(); + private static class heartbeat_write_id_argsTupleSchemeFactory implements SchemeFactory { + public heartbeat_write_id_argsTupleScheme getScheme() { + return new heartbeat_write_id_argsTupleScheme(); } } - private static class finalize_write_id_argsTupleScheme extends TupleScheme { + private static class heartbeat_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -177353,11 +178211,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_a } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new FinalizeWriteIdRequest(); + struct.req = new HeartbeatWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -177366,18 +178224,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_ar } - public static class finalize_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finalize_write_id_result"); + public static class heartbeat_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_write_id_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new finalize_write_id_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new finalize_write_id_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new heartbeat_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new heartbeat_write_id_resultTupleSchemeFactory()); } - private FinalizeWriteIdResult success; // required + private HeartbeatWriteIdResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -177442,16 +178300,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FinalizeWriteIdResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatWriteIdResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finalize_write_id_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_write_id_result.class, metaDataMap); } - public finalize_write_id_result() { + public heartbeat_write_id_result() { } - public finalize_write_id_result( - FinalizeWriteIdResult success) + public heartbeat_write_id_result( + HeartbeatWriteIdResult success) { this(); this.success = success; @@ -177460,14 +178318,14 @@ public finalize_write_id_result( /** * Performs a deep copy on other. */ - public finalize_write_id_result(finalize_write_id_result other) { + public heartbeat_write_id_result(heartbeat_write_id_result other) { if (other.isSetSuccess()) { - this.success = new FinalizeWriteIdResult(other.success); + this.success = new HeartbeatWriteIdResult(other.success); } } - public finalize_write_id_result deepCopy() { - return new finalize_write_id_result(this); + public heartbeat_write_id_result deepCopy() { + return new heartbeat_write_id_result(this); } @Override @@ -177475,11 +178333,11 @@ public void clear() { this.success = null; } - public FinalizeWriteIdResult getSuccess() { + public HeartbeatWriteIdResult getSuccess() { return this.success; } - public void setSuccess(FinalizeWriteIdResult success) { + public void setSuccess(HeartbeatWriteIdResult success) { this.success = success; } @@ -177504,7 +178362,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((FinalizeWriteIdResult)value); + setSuccess((HeartbeatWriteIdResult)value); } break; @@ -177537,12 +178395,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof finalize_write_id_result) - return this.equals((finalize_write_id_result)that); + if (that instanceof heartbeat_write_id_result) + return this.equals((heartbeat_write_id_result)that); return false; } - public boolean equals(finalize_write_id_result that) { + public boolean equals(heartbeat_write_id_result that) { if (that == null) return false; @@ -177571,7 +178429,7 @@ public int hashCode() { } @Override - public int compareTo(finalize_write_id_result other) { + public int compareTo(heartbeat_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -177605,7 +178463,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("finalize_write_id_result("); + StringBuilder sb = new StringBuilder("heartbeat_write_id_result("); boolean first = true; sb.append("success:"); @@ -177643,15 +178501,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class finalize_write_id_resultStandardSchemeFactory implements SchemeFactory { - public finalize_write_id_resultStandardScheme getScheme() { - return new finalize_write_id_resultStandardScheme(); + private static class heartbeat_write_id_resultStandardSchemeFactory implements SchemeFactory { + public heartbeat_write_id_resultStandardScheme getScheme() { + return new heartbeat_write_id_resultStandardScheme(); } } - private static class finalize_write_id_resultStandardScheme extends StandardScheme { + private static class heartbeat_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -177663,7 +178521,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_r switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new FinalizeWriteIdResult(); + struct.success = new HeartbeatWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -177679,7 +178537,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, finalize_write_id_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -177694,16 +178552,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, finalize_write_id_ } - private static class finalize_write_id_resultTupleSchemeFactory implements SchemeFactory { - public finalize_write_id_resultTupleScheme getScheme() { - return new finalize_write_id_resultTupleScheme(); + private static class heartbeat_write_id_resultTupleSchemeFactory implements SchemeFactory { + public heartbeat_write_id_resultTupleScheme getScheme() { + return new heartbeat_write_id_resultTupleScheme(); } } - private static class finalize_write_id_resultTupleScheme extends TupleScheme { + private static class heartbeat_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -177716,11 +178574,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new FinalizeWriteIdResult(); + struct.success = new HeartbeatWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -177729,18 +178587,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_re } - public static class heartbeat_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_write_id_args"); + public static class get_valid_write_ids_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_valid_write_ids_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new heartbeat_write_id_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new heartbeat_write_id_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_valid_write_ids_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_valid_write_ids_argsTupleSchemeFactory()); } - private HeartbeatWriteIdRequest req; // required + private GetValidWriteIdsRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -177805,16 +178663,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatWriteIdRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetValidWriteIdsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_write_id_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_valid_write_ids_args.class, metaDataMap); } - public heartbeat_write_id_args() { + public get_valid_write_ids_args() { } - public heartbeat_write_id_args( - HeartbeatWriteIdRequest req) + public get_valid_write_ids_args( + GetValidWriteIdsRequest req) { this(); this.req = req; @@ -177823,14 +178681,14 @@ public heartbeat_write_id_args( /** * Performs a deep copy on other. */ - public heartbeat_write_id_args(heartbeat_write_id_args other) { + public get_valid_write_ids_args(get_valid_write_ids_args other) { if (other.isSetReq()) { - this.req = new HeartbeatWriteIdRequest(other.req); + this.req = new GetValidWriteIdsRequest(other.req); } } - public heartbeat_write_id_args deepCopy() { - return new heartbeat_write_id_args(this); + public get_valid_write_ids_args deepCopy() { + return new get_valid_write_ids_args(this); } @Override @@ -177838,11 +178696,11 @@ public void clear() { this.req = null; } - public HeartbeatWriteIdRequest getReq() { + public GetValidWriteIdsRequest getReq() { return this.req; } - public void setReq(HeartbeatWriteIdRequest req) { + public void setReq(GetValidWriteIdsRequest req) { this.req = req; } @@ -177867,7 +178725,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((HeartbeatWriteIdRequest)value); + setReq((GetValidWriteIdsRequest)value); } break; @@ -177900,12 +178758,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof heartbeat_write_id_args) - return this.equals((heartbeat_write_id_args)that); + if (that instanceof get_valid_write_ids_args) + return this.equals((get_valid_write_ids_args)that); return false; } - public boolean equals(heartbeat_write_id_args that) { + public boolean equals(get_valid_write_ids_args that) { if (that == null) return false; @@ -177934,7 +178792,7 @@ public int hashCode() { } @Override - public int compareTo(heartbeat_write_id_args other) { + public int compareTo(get_valid_write_ids_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -177968,7 +178826,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("heartbeat_write_id_args("); + StringBuilder sb = new StringBuilder("get_valid_write_ids_args("); boolean first = true; sb.append("req:"); @@ -178006,15 +178864,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class heartbeat_write_id_argsStandardSchemeFactory implements SchemeFactory { - public heartbeat_write_id_argsStandardScheme getScheme() { - return new heartbeat_write_id_argsStandardScheme(); + private static class get_valid_write_ids_argsStandardSchemeFactory implements SchemeFactory { + public get_valid_write_ids_argsStandardScheme getScheme() { + return new get_valid_write_ids_argsStandardScheme(); } } - private static class heartbeat_write_id_argsStandardScheme extends StandardScheme { + private static class get_valid_write_ids_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_valid_write_ids_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -178026,7 +178884,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_ switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new HeartbeatWriteIdRequest(); + struct.req = new GetValidWriteIdsRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -178042,7 +178900,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_valid_write_ids_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -178057,16 +178915,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_write_id } - private static class heartbeat_write_id_argsTupleSchemeFactory implements SchemeFactory { - public heartbeat_write_id_argsTupleScheme getScheme() { - return new heartbeat_write_id_argsTupleScheme(); + private static class get_valid_write_ids_argsTupleSchemeFactory implements SchemeFactory { + public get_valid_write_ids_argsTupleScheme getScheme() { + return new get_valid_write_ids_argsTupleScheme(); } } - private static class heartbeat_write_id_argsTupleScheme extends TupleScheme { + private static class get_valid_write_ids_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_valid_write_ids_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -178079,11 +178937,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_ } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_valid_write_ids_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new HeartbeatWriteIdRequest(); + struct.req = new GetValidWriteIdsRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -178092,18 +178950,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_a } - public static class heartbeat_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_write_id_result"); + public static class get_valid_write_ids_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_valid_write_ids_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new heartbeat_write_id_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new heartbeat_write_id_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_valid_write_ids_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_valid_write_ids_resultTupleSchemeFactory()); } - private HeartbeatWriteIdResult success; // required + private GetValidWriteIdsResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -178168,16 +179026,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatWriteIdResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetValidWriteIdsResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_write_id_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_valid_write_ids_result.class, metaDataMap); } - public heartbeat_write_id_result() { + public get_valid_write_ids_result() { } - public heartbeat_write_id_result( - HeartbeatWriteIdResult success) + public get_valid_write_ids_result( + GetValidWriteIdsResult success) { this(); this.success = success; @@ -178186,14 +179044,14 @@ public heartbeat_write_id_result( /** * Performs a deep copy on other. */ - public heartbeat_write_id_result(heartbeat_write_id_result other) { + public get_valid_write_ids_result(get_valid_write_ids_result other) { if (other.isSetSuccess()) { - this.success = new HeartbeatWriteIdResult(other.success); + this.success = new GetValidWriteIdsResult(other.success); } } - public heartbeat_write_id_result deepCopy() { - return new heartbeat_write_id_result(this); + public get_valid_write_ids_result deepCopy() { + return new get_valid_write_ids_result(this); } @Override @@ -178201,11 +179059,11 @@ public void clear() { this.success = null; } - public HeartbeatWriteIdResult getSuccess() { + public GetValidWriteIdsResult getSuccess() { return this.success; } - public void setSuccess(HeartbeatWriteIdResult success) { + public void setSuccess(GetValidWriteIdsResult success) { this.success = success; } @@ -178230,7 +179088,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((HeartbeatWriteIdResult)value); + setSuccess((GetValidWriteIdsResult)value); } break; @@ -178263,12 +179121,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof heartbeat_write_id_result) - return this.equals((heartbeat_write_id_result)that); + if (that instanceof get_valid_write_ids_result) + return this.equals((get_valid_write_ids_result)that); return false; } - public boolean equals(heartbeat_write_id_result that) { + public boolean equals(get_valid_write_ids_result that) { if (that == null) return false; @@ -178297,7 +179155,7 @@ public int hashCode() { } @Override - public int compareTo(heartbeat_write_id_result other) { + public int compareTo(get_valid_write_ids_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -178331,7 +179189,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("heartbeat_write_id_result("); + StringBuilder sb = new StringBuilder("get_valid_write_ids_result("); boolean first = true; sb.append("success:"); @@ -178369,15 +179227,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class heartbeat_write_id_resultStandardSchemeFactory implements SchemeFactory { - public heartbeat_write_id_resultStandardScheme getScheme() { - return new heartbeat_write_id_resultStandardScheme(); + private static class get_valid_write_ids_resultStandardSchemeFactory implements SchemeFactory { + public get_valid_write_ids_resultStandardScheme getScheme() { + return new get_valid_write_ids_resultStandardScheme(); } } - private static class heartbeat_write_id_resultStandardScheme extends StandardScheme { + private static class get_valid_write_ids_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_valid_write_ids_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -178389,7 +179247,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_ switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new HeartbeatWriteIdResult(); + struct.success = new GetValidWriteIdsResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -178405,7 +179263,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_valid_write_ids_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -178420,16 +179278,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_write_id } - private static class heartbeat_write_id_resultTupleSchemeFactory implements SchemeFactory { - public heartbeat_write_id_resultTupleScheme getScheme() { - return new heartbeat_write_id_resultTupleScheme(); + private static class get_valid_write_ids_resultTupleSchemeFactory implements SchemeFactory { + public get_valid_write_ids_resultTupleScheme getScheme() { + return new get_valid_write_ids_resultTupleScheme(); } } - private static class heartbeat_write_id_resultTupleScheme extends TupleScheme { + private static class get_valid_write_ids_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_valid_write_ids_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -178442,11 +179300,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_ } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_valid_write_ids_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new HeartbeatWriteIdResult(); + struct.success = new GetValidWriteIdsResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index d228a739c532..17a8fa3dd05f 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1178,6 +1178,11 @@ public function finalize_write_id(\metastore\FinalizeWriteIdRequest $req); * @return \metastore\HeartbeatWriteIdResult */ public function heartbeat_write_id(\metastore\HeartbeatWriteIdRequest $req); + /** + * @param \metastore\GetValidWriteIdsRequest $req + * @return \metastore\GetValidWriteIdsResult + */ + public function get_valid_write_ids(\metastore\GetValidWriteIdsRequest $req); } class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf { @@ -9809,6 +9814,57 @@ public function recv_heartbeat_write_id() throw new \Exception("heartbeat_write_id failed: unknown result"); } + public function get_valid_write_ids(\metastore\GetValidWriteIdsRequest $req) + { + $this->send_get_valid_write_ids($req); + return $this->recv_get_valid_write_ids(); + } + + public function send_get_valid_write_ids(\metastore\GetValidWriteIdsRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_get_valid_write_ids_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_valid_write_ids', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_valid_write_ids', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_valid_write_ids() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_valid_write_ids_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_valid_write_ids_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("get_valid_write_ids failed: unknown result"); + } + } // HELPER FUNCTIONS AND STRUCTURES @@ -10960,14 +11016,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size569 = 0; - $_etype572 = 0; - $xfer += $input->readListBegin($_etype572, $_size569); - for ($_i573 = 0; $_i573 < $_size569; ++$_i573) + $_size576 = 0; + $_etype579 = 0; + $xfer += $input->readListBegin($_etype579, $_size576); + for ($_i580 = 0; $_i580 < $_size576; ++$_i580) { - $elem574 = null; - $xfer += $input->readString($elem574); - $this->success []= $elem574; + $elem581 = null; + $xfer += $input->readString($elem581); + $this->success []= $elem581; } $xfer += $input->readListEnd(); } else { @@ -11003,9 +11059,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter575) + foreach ($this->success as $iter582) { - $xfer += $output->writeString($iter575); + $xfer += $output->writeString($iter582); } } $output->writeListEnd(); @@ -11136,14 +11192,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size576 = 0; - $_etype579 = 0; - $xfer += $input->readListBegin($_etype579, $_size576); - for ($_i580 = 0; $_i580 < $_size576; ++$_i580) + $_size583 = 0; + $_etype586 = 0; + $xfer += $input->readListBegin($_etype586, $_size583); + for ($_i587 = 0; $_i587 < $_size583; ++$_i587) { - $elem581 = null; - $xfer += $input->readString($elem581); - $this->success []= $elem581; + $elem588 = null; + $xfer += $input->readString($elem588); + $this->success []= $elem588; } $xfer += $input->readListEnd(); } else { @@ -11179,9 +11235,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter582) + foreach ($this->success as $iter589) { - $xfer += $output->writeString($iter582); + $xfer += $output->writeString($iter589); } } $output->writeListEnd(); @@ -12182,18 +12238,18 @@ public function read($input) case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size583 = 0; - $_ktype584 = 0; - $_vtype585 = 0; - $xfer += $input->readMapBegin($_ktype584, $_vtype585, $_size583); - for ($_i587 = 0; $_i587 < $_size583; ++$_i587) + $_size590 = 0; + $_ktype591 = 0; + $_vtype592 = 0; + $xfer += $input->readMapBegin($_ktype591, $_vtype592, $_size590); + for ($_i594 = 0; $_i594 < $_size590; ++$_i594) { - $key588 = ''; - $val589 = new \metastore\Type(); - $xfer += $input->readString($key588); - $val589 = new \metastore\Type(); - $xfer += $val589->read($input); - $this->success[$key588] = $val589; + $key595 = ''; + $val596 = new \metastore\Type(); + $xfer += $input->readString($key595); + $val596 = new \metastore\Type(); + $xfer += $val596->read($input); + $this->success[$key595] = $val596; } $xfer += $input->readMapEnd(); } else { @@ -12229,10 +12285,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter590 => $viter591) + foreach ($this->success as $kiter597 => $viter598) { - $xfer += $output->writeString($kiter590); - $xfer += $viter591->write($output); + $xfer += $output->writeString($kiter597); + $xfer += $viter598->write($output); } } $output->writeMapEnd(); @@ -12436,15 +12492,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size592 = 0; - $_etype595 = 0; - $xfer += $input->readListBegin($_etype595, $_size592); - for ($_i596 = 0; $_i596 < $_size592; ++$_i596) + $_size599 = 0; + $_etype602 = 0; + $xfer += $input->readListBegin($_etype602, $_size599); + for ($_i603 = 0; $_i603 < $_size599; ++$_i603) { - $elem597 = null; - $elem597 = new \metastore\FieldSchema(); - $xfer += $elem597->read($input); - $this->success []= $elem597; + $elem604 = null; + $elem604 = new \metastore\FieldSchema(); + $xfer += $elem604->read($input); + $this->success []= $elem604; } $xfer += $input->readListEnd(); } else { @@ -12496,9 +12552,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter598) + foreach ($this->success as $iter605) { - $xfer += $iter598->write($output); + $xfer += $iter605->write($output); } } $output->writeListEnd(); @@ -12740,15 +12796,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size599 = 0; - $_etype602 = 0; - $xfer += $input->readListBegin($_etype602, $_size599); - for ($_i603 = 0; $_i603 < $_size599; ++$_i603) + $_size606 = 0; + $_etype609 = 0; + $xfer += $input->readListBegin($_etype609, $_size606); + for ($_i610 = 0; $_i610 < $_size606; ++$_i610) { - $elem604 = null; - $elem604 = new \metastore\FieldSchema(); - $xfer += $elem604->read($input); - $this->success []= $elem604; + $elem611 = null; + $elem611 = new \metastore\FieldSchema(); + $xfer += $elem611->read($input); + $this->success []= $elem611; } $xfer += $input->readListEnd(); } else { @@ -12800,9 +12856,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter605) + foreach ($this->success as $iter612) { - $xfer += $iter605->write($output); + $xfer += $iter612->write($output); } } $output->writeListEnd(); @@ -13016,15 +13072,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size606 = 0; - $_etype609 = 0; - $xfer += $input->readListBegin($_etype609, $_size606); - for ($_i610 = 0; $_i610 < $_size606; ++$_i610) + $_size613 = 0; + $_etype616 = 0; + $xfer += $input->readListBegin($_etype616, $_size613); + for ($_i617 = 0; $_i617 < $_size613; ++$_i617) { - $elem611 = null; - $elem611 = new \metastore\FieldSchema(); - $xfer += $elem611->read($input); - $this->success []= $elem611; + $elem618 = null; + $elem618 = new \metastore\FieldSchema(); + $xfer += $elem618->read($input); + $this->success []= $elem618; } $xfer += $input->readListEnd(); } else { @@ -13076,9 +13132,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter612) + foreach ($this->success as $iter619) { - $xfer += $iter612->write($output); + $xfer += $iter619->write($output); } } $output->writeListEnd(); @@ -13320,15 +13376,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size613 = 0; - $_etype616 = 0; - $xfer += $input->readListBegin($_etype616, $_size613); - for ($_i617 = 0; $_i617 < $_size613; ++$_i617) + $_size620 = 0; + $_etype623 = 0; + $xfer += $input->readListBegin($_etype623, $_size620); + for ($_i624 = 0; $_i624 < $_size620; ++$_i624) { - $elem618 = null; - $elem618 = new \metastore\FieldSchema(); - $xfer += $elem618->read($input); - $this->success []= $elem618; + $elem625 = null; + $elem625 = new \metastore\FieldSchema(); + $xfer += $elem625->read($input); + $this->success []= $elem625; } $xfer += $input->readListEnd(); } else { @@ -13380,9 +13436,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter619) + foreach ($this->success as $iter626) { - $xfer += $iter619->write($output); + $xfer += $iter626->write($output); } } $output->writeListEnd(); @@ -13990,15 +14046,15 @@ public function read($input) case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size620 = 0; - $_etype623 = 0; - $xfer += $input->readListBegin($_etype623, $_size620); - for ($_i624 = 0; $_i624 < $_size620; ++$_i624) + $_size627 = 0; + $_etype630 = 0; + $xfer += $input->readListBegin($_etype630, $_size627); + for ($_i631 = 0; $_i631 < $_size627; ++$_i631) { - $elem625 = null; - $elem625 = new \metastore\SQLPrimaryKey(); - $xfer += $elem625->read($input); - $this->primaryKeys []= $elem625; + $elem632 = null; + $elem632 = new \metastore\SQLPrimaryKey(); + $xfer += $elem632->read($input); + $this->primaryKeys []= $elem632; } $xfer += $input->readListEnd(); } else { @@ -14008,15 +14064,15 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size626 = 0; - $_etype629 = 0; - $xfer += $input->readListBegin($_etype629, $_size626); - for ($_i630 = 0; $_i630 < $_size626; ++$_i630) + $_size633 = 0; + $_etype636 = 0; + $xfer += $input->readListBegin($_etype636, $_size633); + for ($_i637 = 0; $_i637 < $_size633; ++$_i637) { - $elem631 = null; - $elem631 = new \metastore\SQLForeignKey(); - $xfer += $elem631->read($input); - $this->foreignKeys []= $elem631; + $elem638 = null; + $elem638 = new \metastore\SQLForeignKey(); + $xfer += $elem638->read($input); + $this->foreignKeys []= $elem638; } $xfer += $input->readListEnd(); } else { @@ -14052,9 +14108,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter632) + foreach ($this->primaryKeys as $iter639) { - $xfer += $iter632->write($output); + $xfer += $iter639->write($output); } } $output->writeListEnd(); @@ -14069,9 +14125,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter633) + foreach ($this->foreignKeys as $iter640) { - $xfer += $iter633->write($output); + $xfer += $iter640->write($output); } } $output->writeListEnd(); @@ -15417,14 +15473,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size634 = 0; - $_etype637 = 0; - $xfer += $input->readListBegin($_etype637, $_size634); - for ($_i638 = 0; $_i638 < $_size634; ++$_i638) + $_size641 = 0; + $_etype644 = 0; + $xfer += $input->readListBegin($_etype644, $_size641); + for ($_i645 = 0; $_i645 < $_size641; ++$_i645) { - $elem639 = null; - $xfer += $input->readString($elem639); - $this->success []= $elem639; + $elem646 = null; + $xfer += $input->readString($elem646); + $this->success []= $elem646; } $xfer += $input->readListEnd(); } else { @@ -15460,9 +15516,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter640) + foreach ($this->success as $iter647) { - $xfer += $output->writeString($iter640); + $xfer += $output->writeString($iter647); } } $output->writeListEnd(); @@ -15567,14 +15623,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size641 = 0; - $_etype644 = 0; - $xfer += $input->readListBegin($_etype644, $_size641); - for ($_i645 = 0; $_i645 < $_size641; ++$_i645) + $_size648 = 0; + $_etype651 = 0; + $xfer += $input->readListBegin($_etype651, $_size648); + for ($_i652 = 0; $_i652 < $_size648; ++$_i652) { - $elem646 = null; - $xfer += $input->readString($elem646); - $this->tbl_types []= $elem646; + $elem653 = null; + $xfer += $input->readString($elem653); + $this->tbl_types []= $elem653; } $xfer += $input->readListEnd(); } else { @@ -15612,9 +15668,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter647) + foreach ($this->tbl_types as $iter654) { - $xfer += $output->writeString($iter647); + $xfer += $output->writeString($iter654); } } $output->writeListEnd(); @@ -15691,15 +15747,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size648 = 0; - $_etype651 = 0; - $xfer += $input->readListBegin($_etype651, $_size648); - for ($_i652 = 0; $_i652 < $_size648; ++$_i652) + $_size655 = 0; + $_etype658 = 0; + $xfer += $input->readListBegin($_etype658, $_size655); + for ($_i659 = 0; $_i659 < $_size655; ++$_i659) { - $elem653 = null; - $elem653 = new \metastore\TableMeta(); - $xfer += $elem653->read($input); - $this->success []= $elem653; + $elem660 = null; + $elem660 = new \metastore\TableMeta(); + $xfer += $elem660->read($input); + $this->success []= $elem660; } $xfer += $input->readListEnd(); } else { @@ -15735,9 +15791,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter654) + foreach ($this->success as $iter661) { - $xfer += $iter654->write($output); + $xfer += $iter661->write($output); } } $output->writeListEnd(); @@ -15893,14 +15949,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size655 = 0; - $_etype658 = 0; - $xfer += $input->readListBegin($_etype658, $_size655); - for ($_i659 = 0; $_i659 < $_size655; ++$_i659) + $_size662 = 0; + $_etype665 = 0; + $xfer += $input->readListBegin($_etype665, $_size662); + for ($_i666 = 0; $_i666 < $_size662; ++$_i666) { - $elem660 = null; - $xfer += $input->readString($elem660); - $this->success []= $elem660; + $elem667 = null; + $xfer += $input->readString($elem667); + $this->success []= $elem667; } $xfer += $input->readListEnd(); } else { @@ -15936,9 +15992,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter661) + foreach ($this->success as $iter668) { - $xfer += $output->writeString($iter661); + $xfer += $output->writeString($iter668); } } $output->writeListEnd(); @@ -16253,14 +16309,14 @@ public function read($input) case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size662 = 0; - $_etype665 = 0; - $xfer += $input->readListBegin($_etype665, $_size662); - for ($_i666 = 0; $_i666 < $_size662; ++$_i666) + $_size669 = 0; + $_etype672 = 0; + $xfer += $input->readListBegin($_etype672, $_size669); + for ($_i673 = 0; $_i673 < $_size669; ++$_i673) { - $elem667 = null; - $xfer += $input->readString($elem667); - $this->tbl_names []= $elem667; + $elem674 = null; + $xfer += $input->readString($elem674); + $this->tbl_names []= $elem674; } $xfer += $input->readListEnd(); } else { @@ -16293,9 +16349,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter668) + foreach ($this->tbl_names as $iter675) { - $xfer += $output->writeString($iter668); + $xfer += $output->writeString($iter675); } } $output->writeListEnd(); @@ -16396,15 +16452,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size669 = 0; - $_etype672 = 0; - $xfer += $input->readListBegin($_etype672, $_size669); - for ($_i673 = 0; $_i673 < $_size669; ++$_i673) + $_size676 = 0; + $_etype679 = 0; + $xfer += $input->readListBegin($_etype679, $_size676); + for ($_i680 = 0; $_i680 < $_size676; ++$_i680) { - $elem674 = null; - $elem674 = new \metastore\Table(); - $xfer += $elem674->read($input); - $this->success []= $elem674; + $elem681 = null; + $elem681 = new \metastore\Table(); + $xfer += $elem681->read($input); + $this->success []= $elem681; } $xfer += $input->readListEnd(); } else { @@ -16456,9 +16512,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter675) + foreach ($this->success as $iter682) { - $xfer += $iter675->write($output); + $xfer += $iter682->write($output); } } $output->writeListEnd(); @@ -16694,14 +16750,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size676 = 0; - $_etype679 = 0; - $xfer += $input->readListBegin($_etype679, $_size676); - for ($_i680 = 0; $_i680 < $_size676; ++$_i680) + $_size683 = 0; + $_etype686 = 0; + $xfer += $input->readListBegin($_etype686, $_size683); + for ($_i687 = 0; $_i687 < $_size683; ++$_i687) { - $elem681 = null; - $xfer += $input->readString($elem681); - $this->success []= $elem681; + $elem688 = null; + $xfer += $input->readString($elem688); + $this->success []= $elem688; } $xfer += $input->readListEnd(); } else { @@ -16753,9 +16809,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter682) + foreach ($this->success as $iter689) { - $xfer += $output->writeString($iter682); + $xfer += $output->writeString($iter689); } } $output->writeListEnd(); @@ -18068,15 +18124,15 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size683 = 0; - $_etype686 = 0; - $xfer += $input->readListBegin($_etype686, $_size683); - for ($_i687 = 0; $_i687 < $_size683; ++$_i687) + $_size690 = 0; + $_etype693 = 0; + $xfer += $input->readListBegin($_etype693, $_size690); + for ($_i694 = 0; $_i694 < $_size690; ++$_i694) { - $elem688 = null; - $elem688 = new \metastore\Partition(); - $xfer += $elem688->read($input); - $this->new_parts []= $elem688; + $elem695 = null; + $elem695 = new \metastore\Partition(); + $xfer += $elem695->read($input); + $this->new_parts []= $elem695; } $xfer += $input->readListEnd(); } else { @@ -18104,9 +18160,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter689) + foreach ($this->new_parts as $iter696) { - $xfer += $iter689->write($output); + $xfer += $iter696->write($output); } } $output->writeListEnd(); @@ -18321,15 +18377,15 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size690 = 0; - $_etype693 = 0; - $xfer += $input->readListBegin($_etype693, $_size690); - for ($_i694 = 0; $_i694 < $_size690; ++$_i694) + $_size697 = 0; + $_etype700 = 0; + $xfer += $input->readListBegin($_etype700, $_size697); + for ($_i701 = 0; $_i701 < $_size697; ++$_i701) { - $elem695 = null; - $elem695 = new \metastore\PartitionSpec(); - $xfer += $elem695->read($input); - $this->new_parts []= $elem695; + $elem702 = null; + $elem702 = new \metastore\PartitionSpec(); + $xfer += $elem702->read($input); + $this->new_parts []= $elem702; } $xfer += $input->readListEnd(); } else { @@ -18357,9 +18413,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter696) + foreach ($this->new_parts as $iter703) { - $xfer += $iter696->write($output); + $xfer += $iter703->write($output); } } $output->writeListEnd(); @@ -18609,14 +18665,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size697 = 0; - $_etype700 = 0; - $xfer += $input->readListBegin($_etype700, $_size697); - for ($_i701 = 0; $_i701 < $_size697; ++$_i701) + $_size704 = 0; + $_etype707 = 0; + $xfer += $input->readListBegin($_etype707, $_size704); + for ($_i708 = 0; $_i708 < $_size704; ++$_i708) { - $elem702 = null; - $xfer += $input->readString($elem702); - $this->part_vals []= $elem702; + $elem709 = null; + $xfer += $input->readString($elem709); + $this->part_vals []= $elem709; } $xfer += $input->readListEnd(); } else { @@ -18654,9 +18710,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter703) + foreach ($this->part_vals as $iter710) { - $xfer += $output->writeString($iter703); + $xfer += $output->writeString($iter710); } } $output->writeListEnd(); @@ -19158,14 +19214,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size704 = 0; - $_etype707 = 0; - $xfer += $input->readListBegin($_etype707, $_size704); - for ($_i708 = 0; $_i708 < $_size704; ++$_i708) + $_size711 = 0; + $_etype714 = 0; + $xfer += $input->readListBegin($_etype714, $_size711); + for ($_i715 = 0; $_i715 < $_size711; ++$_i715) { - $elem709 = null; - $xfer += $input->readString($elem709); - $this->part_vals []= $elem709; + $elem716 = null; + $xfer += $input->readString($elem716); + $this->part_vals []= $elem716; } $xfer += $input->readListEnd(); } else { @@ -19211,9 +19267,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter710) + foreach ($this->part_vals as $iter717) { - $xfer += $output->writeString($iter710); + $xfer += $output->writeString($iter717); } } $output->writeListEnd(); @@ -20067,14 +20123,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size711 = 0; - $_etype714 = 0; - $xfer += $input->readListBegin($_etype714, $_size711); - for ($_i715 = 0; $_i715 < $_size711; ++$_i715) + $_size718 = 0; + $_etype721 = 0; + $xfer += $input->readListBegin($_etype721, $_size718); + for ($_i722 = 0; $_i722 < $_size718; ++$_i722) { - $elem716 = null; - $xfer += $input->readString($elem716); - $this->part_vals []= $elem716; + $elem723 = null; + $xfer += $input->readString($elem723); + $this->part_vals []= $elem723; } $xfer += $input->readListEnd(); } else { @@ -20119,9 +20175,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter717) + foreach ($this->part_vals as $iter724) { - $xfer += $output->writeString($iter717); + $xfer += $output->writeString($iter724); } } $output->writeListEnd(); @@ -20374,14 +20430,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size718 = 0; - $_etype721 = 0; - $xfer += $input->readListBegin($_etype721, $_size718); - for ($_i722 = 0; $_i722 < $_size718; ++$_i722) + $_size725 = 0; + $_etype728 = 0; + $xfer += $input->readListBegin($_etype728, $_size725); + for ($_i729 = 0; $_i729 < $_size725; ++$_i729) { - $elem723 = null; - $xfer += $input->readString($elem723); - $this->part_vals []= $elem723; + $elem730 = null; + $xfer += $input->readString($elem730); + $this->part_vals []= $elem730; } $xfer += $input->readListEnd(); } else { @@ -20434,9 +20490,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter724) + foreach ($this->part_vals as $iter731) { - $xfer += $output->writeString($iter724); + $xfer += $output->writeString($iter731); } } $output->writeListEnd(); @@ -21450,14 +21506,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size725 = 0; - $_etype728 = 0; - $xfer += $input->readListBegin($_etype728, $_size725); - for ($_i729 = 0; $_i729 < $_size725; ++$_i729) + $_size732 = 0; + $_etype735 = 0; + $xfer += $input->readListBegin($_etype735, $_size732); + for ($_i736 = 0; $_i736 < $_size732; ++$_i736) { - $elem730 = null; - $xfer += $input->readString($elem730); - $this->part_vals []= $elem730; + $elem737 = null; + $xfer += $input->readString($elem737); + $this->part_vals []= $elem737; } $xfer += $input->readListEnd(); } else { @@ -21495,9 +21551,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter731) + foreach ($this->part_vals as $iter738) { - $xfer += $output->writeString($iter731); + $xfer += $output->writeString($iter738); } } $output->writeListEnd(); @@ -21739,17 +21795,17 @@ public function read($input) case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size732 = 0; - $_ktype733 = 0; - $_vtype734 = 0; - $xfer += $input->readMapBegin($_ktype733, $_vtype734, $_size732); - for ($_i736 = 0; $_i736 < $_size732; ++$_i736) + $_size739 = 0; + $_ktype740 = 0; + $_vtype741 = 0; + $xfer += $input->readMapBegin($_ktype740, $_vtype741, $_size739); + for ($_i743 = 0; $_i743 < $_size739; ++$_i743) { - $key737 = ''; - $val738 = ''; - $xfer += $input->readString($key737); - $xfer += $input->readString($val738); - $this->partitionSpecs[$key737] = $val738; + $key744 = ''; + $val745 = ''; + $xfer += $input->readString($key744); + $xfer += $input->readString($val745); + $this->partitionSpecs[$key744] = $val745; } $xfer += $input->readMapEnd(); } else { @@ -21805,10 +21861,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter739 => $viter740) + foreach ($this->partitionSpecs as $kiter746 => $viter747) { - $xfer += $output->writeString($kiter739); - $xfer += $output->writeString($viter740); + $xfer += $output->writeString($kiter746); + $xfer += $output->writeString($viter747); } } $output->writeMapEnd(); @@ -22120,17 +22176,17 @@ public function read($input) case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size741 = 0; - $_ktype742 = 0; - $_vtype743 = 0; - $xfer += $input->readMapBegin($_ktype742, $_vtype743, $_size741); - for ($_i745 = 0; $_i745 < $_size741; ++$_i745) + $_size748 = 0; + $_ktype749 = 0; + $_vtype750 = 0; + $xfer += $input->readMapBegin($_ktype749, $_vtype750, $_size748); + for ($_i752 = 0; $_i752 < $_size748; ++$_i752) { - $key746 = ''; - $val747 = ''; - $xfer += $input->readString($key746); - $xfer += $input->readString($val747); - $this->partitionSpecs[$key746] = $val747; + $key753 = ''; + $val754 = ''; + $xfer += $input->readString($key753); + $xfer += $input->readString($val754); + $this->partitionSpecs[$key753] = $val754; } $xfer += $input->readMapEnd(); } else { @@ -22186,10 +22242,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter748 => $viter749) + foreach ($this->partitionSpecs as $kiter755 => $viter756) { - $xfer += $output->writeString($kiter748); - $xfer += $output->writeString($viter749); + $xfer += $output->writeString($kiter755); + $xfer += $output->writeString($viter756); } } $output->writeMapEnd(); @@ -22322,15 +22378,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size750 = 0; - $_etype753 = 0; - $xfer += $input->readListBegin($_etype753, $_size750); - for ($_i754 = 0; $_i754 < $_size750; ++$_i754) + $_size757 = 0; + $_etype760 = 0; + $xfer += $input->readListBegin($_etype760, $_size757); + for ($_i761 = 0; $_i761 < $_size757; ++$_i761) { - $elem755 = null; - $elem755 = new \metastore\Partition(); - $xfer += $elem755->read($input); - $this->success []= $elem755; + $elem762 = null; + $elem762 = new \metastore\Partition(); + $xfer += $elem762->read($input); + $this->success []= $elem762; } $xfer += $input->readListEnd(); } else { @@ -22390,9 +22446,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter756) + foreach ($this->success as $iter763) { - $xfer += $iter756->write($output); + $xfer += $iter763->write($output); } } $output->writeListEnd(); @@ -22538,14 +22594,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size757 = 0; - $_etype760 = 0; - $xfer += $input->readListBegin($_etype760, $_size757); - for ($_i761 = 0; $_i761 < $_size757; ++$_i761) + $_size764 = 0; + $_etype767 = 0; + $xfer += $input->readListBegin($_etype767, $_size764); + for ($_i768 = 0; $_i768 < $_size764; ++$_i768) { - $elem762 = null; - $xfer += $input->readString($elem762); - $this->part_vals []= $elem762; + $elem769 = null; + $xfer += $input->readString($elem769); + $this->part_vals []= $elem769; } $xfer += $input->readListEnd(); } else { @@ -22562,14 +22618,14 @@ public function read($input) case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size763 = 0; - $_etype766 = 0; - $xfer += $input->readListBegin($_etype766, $_size763); - for ($_i767 = 0; $_i767 < $_size763; ++$_i767) + $_size770 = 0; + $_etype773 = 0; + $xfer += $input->readListBegin($_etype773, $_size770); + for ($_i774 = 0; $_i774 < $_size770; ++$_i774) { - $elem768 = null; - $xfer += $input->readString($elem768); - $this->group_names []= $elem768; + $elem775 = null; + $xfer += $input->readString($elem775); + $this->group_names []= $elem775; } $xfer += $input->readListEnd(); } else { @@ -22607,9 +22663,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter769) + foreach ($this->part_vals as $iter776) { - $xfer += $output->writeString($iter769); + $xfer += $output->writeString($iter776); } } $output->writeListEnd(); @@ -22629,9 +22685,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter770) + foreach ($this->group_names as $iter777) { - $xfer += $output->writeString($iter770); + $xfer += $output->writeString($iter777); } } $output->writeListEnd(); @@ -23222,15 +23278,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size771 = 0; - $_etype774 = 0; - $xfer += $input->readListBegin($_etype774, $_size771); - for ($_i775 = 0; $_i775 < $_size771; ++$_i775) + $_size778 = 0; + $_etype781 = 0; + $xfer += $input->readListBegin($_etype781, $_size778); + for ($_i782 = 0; $_i782 < $_size778; ++$_i782) { - $elem776 = null; - $elem776 = new \metastore\Partition(); - $xfer += $elem776->read($input); - $this->success []= $elem776; + $elem783 = null; + $elem783 = new \metastore\Partition(); + $xfer += $elem783->read($input); + $this->success []= $elem783; } $xfer += $input->readListEnd(); } else { @@ -23274,9 +23330,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter777) + foreach ($this->success as $iter784) { - $xfer += $iter777->write($output); + $xfer += $iter784->write($output); } } $output->writeListEnd(); @@ -23422,14 +23478,14 @@ public function read($input) case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size778 = 0; - $_etype781 = 0; - $xfer += $input->readListBegin($_etype781, $_size778); - for ($_i782 = 0; $_i782 < $_size778; ++$_i782) + $_size785 = 0; + $_etype788 = 0; + $xfer += $input->readListBegin($_etype788, $_size785); + for ($_i789 = 0; $_i789 < $_size785; ++$_i789) { - $elem783 = null; - $xfer += $input->readString($elem783); - $this->group_names []= $elem783; + $elem790 = null; + $xfer += $input->readString($elem790); + $this->group_names []= $elem790; } $xfer += $input->readListEnd(); } else { @@ -23477,9 +23533,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter784) + foreach ($this->group_names as $iter791) { - $xfer += $output->writeString($iter784); + $xfer += $output->writeString($iter791); } } $output->writeListEnd(); @@ -23568,15 +23624,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size785 = 0; - $_etype788 = 0; - $xfer += $input->readListBegin($_etype788, $_size785); - for ($_i789 = 0; $_i789 < $_size785; ++$_i789) + $_size792 = 0; + $_etype795 = 0; + $xfer += $input->readListBegin($_etype795, $_size792); + for ($_i796 = 0; $_i796 < $_size792; ++$_i796) { - $elem790 = null; - $elem790 = new \metastore\Partition(); - $xfer += $elem790->read($input); - $this->success []= $elem790; + $elem797 = null; + $elem797 = new \metastore\Partition(); + $xfer += $elem797->read($input); + $this->success []= $elem797; } $xfer += $input->readListEnd(); } else { @@ -23620,9 +23676,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter791) + foreach ($this->success as $iter798) { - $xfer += $iter791->write($output); + $xfer += $iter798->write($output); } } $output->writeListEnd(); @@ -23842,15 +23898,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size792 = 0; - $_etype795 = 0; - $xfer += $input->readListBegin($_etype795, $_size792); - for ($_i796 = 0; $_i796 < $_size792; ++$_i796) + $_size799 = 0; + $_etype802 = 0; + $xfer += $input->readListBegin($_etype802, $_size799); + for ($_i803 = 0; $_i803 < $_size799; ++$_i803) { - $elem797 = null; - $elem797 = new \metastore\PartitionSpec(); - $xfer += $elem797->read($input); - $this->success []= $elem797; + $elem804 = null; + $elem804 = new \metastore\PartitionSpec(); + $xfer += $elem804->read($input); + $this->success []= $elem804; } $xfer += $input->readListEnd(); } else { @@ -23894,9 +23950,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter798) + foreach ($this->success as $iter805) { - $xfer += $iter798->write($output); + $xfer += $iter805->write($output); } } $output->writeListEnd(); @@ -24103,14 +24159,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size799 = 0; - $_etype802 = 0; - $xfer += $input->readListBegin($_etype802, $_size799); - for ($_i803 = 0; $_i803 < $_size799; ++$_i803) + $_size806 = 0; + $_etype809 = 0; + $xfer += $input->readListBegin($_etype809, $_size806); + for ($_i810 = 0; $_i810 < $_size806; ++$_i810) { - $elem804 = null; - $xfer += $input->readString($elem804); - $this->success []= $elem804; + $elem811 = null; + $xfer += $input->readString($elem811); + $this->success []= $elem811; } $xfer += $input->readListEnd(); } else { @@ -24146,9 +24202,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter805) + foreach ($this->success as $iter812) { - $xfer += $output->writeString($iter805); + $xfer += $output->writeString($iter812); } } $output->writeListEnd(); @@ -24264,14 +24320,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size806 = 0; - $_etype809 = 0; - $xfer += $input->readListBegin($_etype809, $_size806); - for ($_i810 = 0; $_i810 < $_size806; ++$_i810) + $_size813 = 0; + $_etype816 = 0; + $xfer += $input->readListBegin($_etype816, $_size813); + for ($_i817 = 0; $_i817 < $_size813; ++$_i817) { - $elem811 = null; - $xfer += $input->readString($elem811); - $this->part_vals []= $elem811; + $elem818 = null; + $xfer += $input->readString($elem818); + $this->part_vals []= $elem818; } $xfer += $input->readListEnd(); } else { @@ -24316,9 +24372,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter812) + foreach ($this->part_vals as $iter819) { - $xfer += $output->writeString($iter812); + $xfer += $output->writeString($iter819); } } $output->writeListEnd(); @@ -24412,15 +24468,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size813 = 0; - $_etype816 = 0; - $xfer += $input->readListBegin($_etype816, $_size813); - for ($_i817 = 0; $_i817 < $_size813; ++$_i817) + $_size820 = 0; + $_etype823 = 0; + $xfer += $input->readListBegin($_etype823, $_size820); + for ($_i824 = 0; $_i824 < $_size820; ++$_i824) { - $elem818 = null; - $elem818 = new \metastore\Partition(); - $xfer += $elem818->read($input); - $this->success []= $elem818; + $elem825 = null; + $elem825 = new \metastore\Partition(); + $xfer += $elem825->read($input); + $this->success []= $elem825; } $xfer += $input->readListEnd(); } else { @@ -24464,9 +24520,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter819) + foreach ($this->success as $iter826) { - $xfer += $iter819->write($output); + $xfer += $iter826->write($output); } } $output->writeListEnd(); @@ -24613,14 +24669,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size820 = 0; - $_etype823 = 0; - $xfer += $input->readListBegin($_etype823, $_size820); - for ($_i824 = 0; $_i824 < $_size820; ++$_i824) + $_size827 = 0; + $_etype830 = 0; + $xfer += $input->readListBegin($_etype830, $_size827); + for ($_i831 = 0; $_i831 < $_size827; ++$_i831) { - $elem825 = null; - $xfer += $input->readString($elem825); - $this->part_vals []= $elem825; + $elem832 = null; + $xfer += $input->readString($elem832); + $this->part_vals []= $elem832; } $xfer += $input->readListEnd(); } else { @@ -24644,14 +24700,14 @@ public function read($input) case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size826 = 0; - $_etype829 = 0; - $xfer += $input->readListBegin($_etype829, $_size826); - for ($_i830 = 0; $_i830 < $_size826; ++$_i830) + $_size833 = 0; + $_etype836 = 0; + $xfer += $input->readListBegin($_etype836, $_size833); + for ($_i837 = 0; $_i837 < $_size833; ++$_i837) { - $elem831 = null; - $xfer += $input->readString($elem831); - $this->group_names []= $elem831; + $elem838 = null; + $xfer += $input->readString($elem838); + $this->group_names []= $elem838; } $xfer += $input->readListEnd(); } else { @@ -24689,9 +24745,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter832) + foreach ($this->part_vals as $iter839) { - $xfer += $output->writeString($iter832); + $xfer += $output->writeString($iter839); } } $output->writeListEnd(); @@ -24716,9 +24772,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter833) + foreach ($this->group_names as $iter840) { - $xfer += $output->writeString($iter833); + $xfer += $output->writeString($iter840); } } $output->writeListEnd(); @@ -24807,15 +24863,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size834 = 0; - $_etype837 = 0; - $xfer += $input->readListBegin($_etype837, $_size834); - for ($_i838 = 0; $_i838 < $_size834; ++$_i838) + $_size841 = 0; + $_etype844 = 0; + $xfer += $input->readListBegin($_etype844, $_size841); + for ($_i845 = 0; $_i845 < $_size841; ++$_i845) { - $elem839 = null; - $elem839 = new \metastore\Partition(); - $xfer += $elem839->read($input); - $this->success []= $elem839; + $elem846 = null; + $elem846 = new \metastore\Partition(); + $xfer += $elem846->read($input); + $this->success []= $elem846; } $xfer += $input->readListEnd(); } else { @@ -24859,9 +24915,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter840) + foreach ($this->success as $iter847) { - $xfer += $iter840->write($output); + $xfer += $iter847->write($output); } } $output->writeListEnd(); @@ -24982,14 +25038,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size841 = 0; - $_etype844 = 0; - $xfer += $input->readListBegin($_etype844, $_size841); - for ($_i845 = 0; $_i845 < $_size841; ++$_i845) + $_size848 = 0; + $_etype851 = 0; + $xfer += $input->readListBegin($_etype851, $_size848); + for ($_i852 = 0; $_i852 < $_size848; ++$_i852) { - $elem846 = null; - $xfer += $input->readString($elem846); - $this->part_vals []= $elem846; + $elem853 = null; + $xfer += $input->readString($elem853); + $this->part_vals []= $elem853; } $xfer += $input->readListEnd(); } else { @@ -25034,9 +25090,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter847) + foreach ($this->part_vals as $iter854) { - $xfer += $output->writeString($iter847); + $xfer += $output->writeString($iter854); } } $output->writeListEnd(); @@ -25129,14 +25185,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size848 = 0; - $_etype851 = 0; - $xfer += $input->readListBegin($_etype851, $_size848); - for ($_i852 = 0; $_i852 < $_size848; ++$_i852) + $_size855 = 0; + $_etype858 = 0; + $xfer += $input->readListBegin($_etype858, $_size855); + for ($_i859 = 0; $_i859 < $_size855; ++$_i859) { - $elem853 = null; - $xfer += $input->readString($elem853); - $this->success []= $elem853; + $elem860 = null; + $xfer += $input->readString($elem860); + $this->success []= $elem860; } $xfer += $input->readListEnd(); } else { @@ -25180,9 +25236,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter854) + foreach ($this->success as $iter861) { - $xfer += $output->writeString($iter854); + $xfer += $output->writeString($iter861); } } $output->writeListEnd(); @@ -25425,15 +25481,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size855 = 0; - $_etype858 = 0; - $xfer += $input->readListBegin($_etype858, $_size855); - for ($_i859 = 0; $_i859 < $_size855; ++$_i859) + $_size862 = 0; + $_etype865 = 0; + $xfer += $input->readListBegin($_etype865, $_size862); + for ($_i866 = 0; $_i866 < $_size862; ++$_i866) { - $elem860 = null; - $elem860 = new \metastore\Partition(); - $xfer += $elem860->read($input); - $this->success []= $elem860; + $elem867 = null; + $elem867 = new \metastore\Partition(); + $xfer += $elem867->read($input); + $this->success []= $elem867; } $xfer += $input->readListEnd(); } else { @@ -25477,9 +25533,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter861) + foreach ($this->success as $iter868) { - $xfer += $iter861->write($output); + $xfer += $iter868->write($output); } } $output->writeListEnd(); @@ -25722,15 +25778,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size862 = 0; - $_etype865 = 0; - $xfer += $input->readListBegin($_etype865, $_size862); - for ($_i866 = 0; $_i866 < $_size862; ++$_i866) + $_size869 = 0; + $_etype872 = 0; + $xfer += $input->readListBegin($_etype872, $_size869); + for ($_i873 = 0; $_i873 < $_size869; ++$_i873) { - $elem867 = null; - $elem867 = new \metastore\PartitionSpec(); - $xfer += $elem867->read($input); - $this->success []= $elem867; + $elem874 = null; + $elem874 = new \metastore\PartitionSpec(); + $xfer += $elem874->read($input); + $this->success []= $elem874; } $xfer += $input->readListEnd(); } else { @@ -25774,9 +25830,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter868) + foreach ($this->success as $iter875) { - $xfer += $iter868->write($output); + $xfer += $iter875->write($output); } } $output->writeListEnd(); @@ -26342,14 +26398,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size869 = 0; - $_etype872 = 0; - $xfer += $input->readListBegin($_etype872, $_size869); - for ($_i873 = 0; $_i873 < $_size869; ++$_i873) + $_size876 = 0; + $_etype879 = 0; + $xfer += $input->readListBegin($_etype879, $_size876); + for ($_i880 = 0; $_i880 < $_size876; ++$_i880) { - $elem874 = null; - $xfer += $input->readString($elem874); - $this->names []= $elem874; + $elem881 = null; + $xfer += $input->readString($elem881); + $this->names []= $elem881; } $xfer += $input->readListEnd(); } else { @@ -26387,9 +26443,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter875) + foreach ($this->names as $iter882) { - $xfer += $output->writeString($iter875); + $xfer += $output->writeString($iter882); } } $output->writeListEnd(); @@ -26478,15 +26534,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size876 = 0; - $_etype879 = 0; - $xfer += $input->readListBegin($_etype879, $_size876); - for ($_i880 = 0; $_i880 < $_size876; ++$_i880) + $_size883 = 0; + $_etype886 = 0; + $xfer += $input->readListBegin($_etype886, $_size883); + for ($_i887 = 0; $_i887 < $_size883; ++$_i887) { - $elem881 = null; - $elem881 = new \metastore\Partition(); - $xfer += $elem881->read($input); - $this->success []= $elem881; + $elem888 = null; + $elem888 = new \metastore\Partition(); + $xfer += $elem888->read($input); + $this->success []= $elem888; } $xfer += $input->readListEnd(); } else { @@ -26530,9 +26586,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter882) + foreach ($this->success as $iter889) { - $xfer += $iter882->write($output); + $xfer += $iter889->write($output); } } $output->writeListEnd(); @@ -26871,15 +26927,15 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size883 = 0; - $_etype886 = 0; - $xfer += $input->readListBegin($_etype886, $_size883); - for ($_i887 = 0; $_i887 < $_size883; ++$_i887) + $_size890 = 0; + $_etype893 = 0; + $xfer += $input->readListBegin($_etype893, $_size890); + for ($_i894 = 0; $_i894 < $_size890; ++$_i894) { - $elem888 = null; - $elem888 = new \metastore\Partition(); - $xfer += $elem888->read($input); - $this->new_parts []= $elem888; + $elem895 = null; + $elem895 = new \metastore\Partition(); + $xfer += $elem895->read($input); + $this->new_parts []= $elem895; } $xfer += $input->readListEnd(); } else { @@ -26917,9 +26973,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter889) + foreach ($this->new_parts as $iter896) { - $xfer += $iter889->write($output); + $xfer += $iter896->write($output); } } $output->writeListEnd(); @@ -27134,15 +27190,15 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size890 = 0; - $_etype893 = 0; - $xfer += $input->readListBegin($_etype893, $_size890); - for ($_i894 = 0; $_i894 < $_size890; ++$_i894) + $_size897 = 0; + $_etype900 = 0; + $xfer += $input->readListBegin($_etype900, $_size897); + for ($_i901 = 0; $_i901 < $_size897; ++$_i901) { - $elem895 = null; - $elem895 = new \metastore\Partition(); - $xfer += $elem895->read($input); - $this->new_parts []= $elem895; + $elem902 = null; + $elem902 = new \metastore\Partition(); + $xfer += $elem902->read($input); + $this->new_parts []= $elem902; } $xfer += $input->readListEnd(); } else { @@ -27188,9 +27244,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter896) + foreach ($this->new_parts as $iter903) { - $xfer += $iter896->write($output); + $xfer += $iter903->write($output); } } $output->writeListEnd(); @@ -27668,14 +27724,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size897 = 0; - $_etype900 = 0; - $xfer += $input->readListBegin($_etype900, $_size897); - for ($_i901 = 0; $_i901 < $_size897; ++$_i901) + $_size904 = 0; + $_etype907 = 0; + $xfer += $input->readListBegin($_etype907, $_size904); + for ($_i908 = 0; $_i908 < $_size904; ++$_i908) { - $elem902 = null; - $xfer += $input->readString($elem902); - $this->part_vals []= $elem902; + $elem909 = null; + $xfer += $input->readString($elem909); + $this->part_vals []= $elem909; } $xfer += $input->readListEnd(); } else { @@ -27721,9 +27777,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter903) + foreach ($this->part_vals as $iter910) { - $xfer += $output->writeString($iter903); + $xfer += $output->writeString($iter910); } } $output->writeListEnd(); @@ -27908,14 +27964,14 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size904 = 0; - $_etype907 = 0; - $xfer += $input->readListBegin($_etype907, $_size904); - for ($_i908 = 0; $_i908 < $_size904; ++$_i908) + $_size911 = 0; + $_etype914 = 0; + $xfer += $input->readListBegin($_etype914, $_size911); + for ($_i915 = 0; $_i915 < $_size911; ++$_i915) { - $elem909 = null; - $xfer += $input->readString($elem909); - $this->part_vals []= $elem909; + $elem916 = null; + $xfer += $input->readString($elem916); + $this->part_vals []= $elem916; } $xfer += $input->readListEnd(); } else { @@ -27950,9 +28006,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter910) + foreach ($this->part_vals as $iter917) { - $xfer += $output->writeString($iter910); + $xfer += $output->writeString($iter917); } } $output->writeListEnd(); @@ -28406,14 +28462,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size911 = 0; - $_etype914 = 0; - $xfer += $input->readListBegin($_etype914, $_size911); - for ($_i915 = 0; $_i915 < $_size911; ++$_i915) + $_size918 = 0; + $_etype921 = 0; + $xfer += $input->readListBegin($_etype921, $_size918); + for ($_i922 = 0; $_i922 < $_size918; ++$_i922) { - $elem916 = null; - $xfer += $input->readString($elem916); - $this->success []= $elem916; + $elem923 = null; + $xfer += $input->readString($elem923); + $this->success []= $elem923; } $xfer += $input->readListEnd(); } else { @@ -28449,9 +28505,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter917) + foreach ($this->success as $iter924) { - $xfer += $output->writeString($iter917); + $xfer += $output->writeString($iter924); } } $output->writeListEnd(); @@ -28611,17 +28667,17 @@ public function read($input) case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size918 = 0; - $_ktype919 = 0; - $_vtype920 = 0; - $xfer += $input->readMapBegin($_ktype919, $_vtype920, $_size918); - for ($_i922 = 0; $_i922 < $_size918; ++$_i922) + $_size925 = 0; + $_ktype926 = 0; + $_vtype927 = 0; + $xfer += $input->readMapBegin($_ktype926, $_vtype927, $_size925); + for ($_i929 = 0; $_i929 < $_size925; ++$_i929) { - $key923 = ''; - $val924 = ''; - $xfer += $input->readString($key923); - $xfer += $input->readString($val924); - $this->success[$key923] = $val924; + $key930 = ''; + $val931 = ''; + $xfer += $input->readString($key930); + $xfer += $input->readString($val931); + $this->success[$key930] = $val931; } $xfer += $input->readMapEnd(); } else { @@ -28657,10 +28713,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter925 => $viter926) + foreach ($this->success as $kiter932 => $viter933) { - $xfer += $output->writeString($kiter925); - $xfer += $output->writeString($viter926); + $xfer += $output->writeString($kiter932); + $xfer += $output->writeString($viter933); } } $output->writeMapEnd(); @@ -28780,17 +28836,17 @@ public function read($input) case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size927 = 0; - $_ktype928 = 0; - $_vtype929 = 0; - $xfer += $input->readMapBegin($_ktype928, $_vtype929, $_size927); - for ($_i931 = 0; $_i931 < $_size927; ++$_i931) + $_size934 = 0; + $_ktype935 = 0; + $_vtype936 = 0; + $xfer += $input->readMapBegin($_ktype935, $_vtype936, $_size934); + for ($_i938 = 0; $_i938 < $_size934; ++$_i938) { - $key932 = ''; - $val933 = ''; - $xfer += $input->readString($key932); - $xfer += $input->readString($val933); - $this->part_vals[$key932] = $val933; + $key939 = ''; + $val940 = ''; + $xfer += $input->readString($key939); + $xfer += $input->readString($val940); + $this->part_vals[$key939] = $val940; } $xfer += $input->readMapEnd(); } else { @@ -28835,10 +28891,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter934 => $viter935) + foreach ($this->part_vals as $kiter941 => $viter942) { - $xfer += $output->writeString($kiter934); - $xfer += $output->writeString($viter935); + $xfer += $output->writeString($kiter941); + $xfer += $output->writeString($viter942); } } $output->writeMapEnd(); @@ -29160,17 +29216,17 @@ public function read($input) case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size936 = 0; - $_ktype937 = 0; - $_vtype938 = 0; - $xfer += $input->readMapBegin($_ktype937, $_vtype938, $_size936); - for ($_i940 = 0; $_i940 < $_size936; ++$_i940) + $_size943 = 0; + $_ktype944 = 0; + $_vtype945 = 0; + $xfer += $input->readMapBegin($_ktype944, $_vtype945, $_size943); + for ($_i947 = 0; $_i947 < $_size943; ++$_i947) { - $key941 = ''; - $val942 = ''; - $xfer += $input->readString($key941); - $xfer += $input->readString($val942); - $this->part_vals[$key941] = $val942; + $key948 = ''; + $val949 = ''; + $xfer += $input->readString($key948); + $xfer += $input->readString($val949); + $this->part_vals[$key948] = $val949; } $xfer += $input->readMapEnd(); } else { @@ -29215,10 +29271,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter943 => $viter944) + foreach ($this->part_vals as $kiter950 => $viter951) { - $xfer += $output->writeString($kiter943); - $xfer += $output->writeString($viter944); + $xfer += $output->writeString($kiter950); + $xfer += $output->writeString($viter951); } } $output->writeMapEnd(); @@ -30692,15 +30748,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size945 = 0; - $_etype948 = 0; - $xfer += $input->readListBegin($_etype948, $_size945); - for ($_i949 = 0; $_i949 < $_size945; ++$_i949) + $_size952 = 0; + $_etype955 = 0; + $xfer += $input->readListBegin($_etype955, $_size952); + for ($_i956 = 0; $_i956 < $_size952; ++$_i956) { - $elem950 = null; - $elem950 = new \metastore\Index(); - $xfer += $elem950->read($input); - $this->success []= $elem950; + $elem957 = null; + $elem957 = new \metastore\Index(); + $xfer += $elem957->read($input); + $this->success []= $elem957; } $xfer += $input->readListEnd(); } else { @@ -30744,9 +30800,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter951) + foreach ($this->success as $iter958) { - $xfer += $iter951->write($output); + $xfer += $iter958->write($output); } } $output->writeListEnd(); @@ -30953,14 +31009,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size952 = 0; - $_etype955 = 0; - $xfer += $input->readListBegin($_etype955, $_size952); - for ($_i956 = 0; $_i956 < $_size952; ++$_i956) + $_size959 = 0; + $_etype962 = 0; + $xfer += $input->readListBegin($_etype962, $_size959); + for ($_i963 = 0; $_i963 < $_size959; ++$_i963) { - $elem957 = null; - $xfer += $input->readString($elem957); - $this->success []= $elem957; + $elem964 = null; + $xfer += $input->readString($elem964); + $this->success []= $elem964; } $xfer += $input->readListEnd(); } else { @@ -30996,9 +31052,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter958) + foreach ($this->success as $iter965) { - $xfer += $output->writeString($iter958); + $xfer += $output->writeString($iter965); } } $output->writeListEnd(); @@ -34892,14 +34948,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size959 = 0; - $_etype962 = 0; - $xfer += $input->readListBegin($_etype962, $_size959); - for ($_i963 = 0; $_i963 < $_size959; ++$_i963) + $_size966 = 0; + $_etype969 = 0; + $xfer += $input->readListBegin($_etype969, $_size966); + for ($_i970 = 0; $_i970 < $_size966; ++$_i970) { - $elem964 = null; - $xfer += $input->readString($elem964); - $this->success []= $elem964; + $elem971 = null; + $xfer += $input->readString($elem971); + $this->success []= $elem971; } $xfer += $input->readListEnd(); } else { @@ -34935,9 +34991,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter965) + foreach ($this->success as $iter972) { - $xfer += $output->writeString($iter965); + $xfer += $output->writeString($iter972); } } $output->writeListEnd(); @@ -35806,14 +35862,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size966 = 0; - $_etype969 = 0; - $xfer += $input->readListBegin($_etype969, $_size966); - for ($_i970 = 0; $_i970 < $_size966; ++$_i970) + $_size973 = 0; + $_etype976 = 0; + $xfer += $input->readListBegin($_etype976, $_size973); + for ($_i977 = 0; $_i977 < $_size973; ++$_i977) { - $elem971 = null; - $xfer += $input->readString($elem971); - $this->success []= $elem971; + $elem978 = null; + $xfer += $input->readString($elem978); + $this->success []= $elem978; } $xfer += $input->readListEnd(); } else { @@ -35849,9 +35905,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter972) + foreach ($this->success as $iter979) { - $xfer += $output->writeString($iter972); + $xfer += $output->writeString($iter979); } } $output->writeListEnd(); @@ -36542,15 +36598,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size973 = 0; - $_etype976 = 0; - $xfer += $input->readListBegin($_etype976, $_size973); - for ($_i977 = 0; $_i977 < $_size973; ++$_i977) + $_size980 = 0; + $_etype983 = 0; + $xfer += $input->readListBegin($_etype983, $_size980); + for ($_i984 = 0; $_i984 < $_size980; ++$_i984) { - $elem978 = null; - $elem978 = new \metastore\Role(); - $xfer += $elem978->read($input); - $this->success []= $elem978; + $elem985 = null; + $elem985 = new \metastore\Role(); + $xfer += $elem985->read($input); + $this->success []= $elem985; } $xfer += $input->readListEnd(); } else { @@ -36586,9 +36642,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter979) + foreach ($this->success as $iter986) { - $xfer += $iter979->write($output); + $xfer += $iter986->write($output); } } $output->writeListEnd(); @@ -37250,14 +37306,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size980 = 0; - $_etype983 = 0; - $xfer += $input->readListBegin($_etype983, $_size980); - for ($_i984 = 0; $_i984 < $_size980; ++$_i984) + $_size987 = 0; + $_etype990 = 0; + $xfer += $input->readListBegin($_etype990, $_size987); + for ($_i991 = 0; $_i991 < $_size987; ++$_i991) { - $elem985 = null; - $xfer += $input->readString($elem985); - $this->group_names []= $elem985; + $elem992 = null; + $xfer += $input->readString($elem992); + $this->group_names []= $elem992; } $xfer += $input->readListEnd(); } else { @@ -37298,9 +37354,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter986) + foreach ($this->group_names as $iter993) { - $xfer += $output->writeString($iter986); + $xfer += $output->writeString($iter993); } } $output->writeListEnd(); @@ -37608,15 +37664,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size987 = 0; - $_etype990 = 0; - $xfer += $input->readListBegin($_etype990, $_size987); - for ($_i991 = 0; $_i991 < $_size987; ++$_i991) + $_size994 = 0; + $_etype997 = 0; + $xfer += $input->readListBegin($_etype997, $_size994); + for ($_i998 = 0; $_i998 < $_size994; ++$_i998) { - $elem992 = null; - $elem992 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem992->read($input); - $this->success []= $elem992; + $elem999 = null; + $elem999 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem999->read($input); + $this->success []= $elem999; } $xfer += $input->readListEnd(); } else { @@ -37652,9 +37708,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter993) + foreach ($this->success as $iter1000) { - $xfer += $iter993->write($output); + $xfer += $iter1000->write($output); } } $output->writeListEnd(); @@ -38286,14 +38342,14 @@ public function read($input) case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size994 = 0; - $_etype997 = 0; - $xfer += $input->readListBegin($_etype997, $_size994); - for ($_i998 = 0; $_i998 < $_size994; ++$_i998) + $_size1001 = 0; + $_etype1004 = 0; + $xfer += $input->readListBegin($_etype1004, $_size1001); + for ($_i1005 = 0; $_i1005 < $_size1001; ++$_i1005) { - $elem999 = null; - $xfer += $input->readString($elem999); - $this->group_names []= $elem999; + $elem1006 = null; + $xfer += $input->readString($elem1006); + $this->group_names []= $elem1006; } $xfer += $input->readListEnd(); } else { @@ -38326,9 +38382,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1000) + foreach ($this->group_names as $iter1007) { - $xfer += $output->writeString($iter1000); + $xfer += $output->writeString($iter1007); } } $output->writeListEnd(); @@ -38404,14 +38460,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1001 = 0; - $_etype1004 = 0; - $xfer += $input->readListBegin($_etype1004, $_size1001); - for ($_i1005 = 0; $_i1005 < $_size1001; ++$_i1005) + $_size1008 = 0; + $_etype1011 = 0; + $xfer += $input->readListBegin($_etype1011, $_size1008); + for ($_i1012 = 0; $_i1012 < $_size1008; ++$_i1012) { - $elem1006 = null; - $xfer += $input->readString($elem1006); - $this->success []= $elem1006; + $elem1013 = null; + $xfer += $input->readString($elem1013); + $this->success []= $elem1013; } $xfer += $input->readListEnd(); } else { @@ -38447,9 +38503,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1007) + foreach ($this->success as $iter1014) { - $xfer += $output->writeString($iter1007); + $xfer += $output->writeString($iter1014); } } $output->writeListEnd(); @@ -39566,14 +39622,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1008 = 0; - $_etype1011 = 0; - $xfer += $input->readListBegin($_etype1011, $_size1008); - for ($_i1012 = 0; $_i1012 < $_size1008; ++$_i1012) + $_size1015 = 0; + $_etype1018 = 0; + $xfer += $input->readListBegin($_etype1018, $_size1015); + for ($_i1019 = 0; $_i1019 < $_size1015; ++$_i1019) { - $elem1013 = null; - $xfer += $input->readString($elem1013); - $this->success []= $elem1013; + $elem1020 = null; + $xfer += $input->readString($elem1020); + $this->success []= $elem1020; } $xfer += $input->readListEnd(); } else { @@ -39601,9 +39657,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1014) + foreach ($this->success as $iter1021) { - $xfer += $output->writeString($iter1014); + $xfer += $output->writeString($iter1021); } } $output->writeListEnd(); @@ -40242,14 +40298,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1015 = 0; - $_etype1018 = 0; - $xfer += $input->readListBegin($_etype1018, $_size1015); - for ($_i1019 = 0; $_i1019 < $_size1015; ++$_i1019) + $_size1022 = 0; + $_etype1025 = 0; + $xfer += $input->readListBegin($_etype1025, $_size1022); + for ($_i1026 = 0; $_i1026 < $_size1022; ++$_i1026) { - $elem1020 = null; - $xfer += $input->readString($elem1020); - $this->success []= $elem1020; + $elem1027 = null; + $xfer += $input->readString($elem1027); + $this->success []= $elem1027; } $xfer += $input->readListEnd(); } else { @@ -40277,9 +40333,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1021) + foreach ($this->success as $iter1028) { - $xfer += $output->writeString($iter1021); + $xfer += $output->writeString($iter1028); } } $output->writeListEnd(); @@ -44665,4 +44721,164 @@ public function write($output) { } +class ThriftHiveMetastore_get_valid_write_ids_args { + static $_TSPEC; + + /** + * @var \metastore\GetValidWriteIdsRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetValidWriteIdsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_valid_write_ids_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\GetValidWriteIdsRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_valid_write_ids_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_valid_write_ids_result { + static $_TSPEC; + + /** + * @var \metastore\GetValidWriteIdsResult + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetValidWriteIdsResult', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_valid_write_ids_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\GetValidWriteIdsResult(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_valid_write_ids_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php index 78eb36529e0b..1418c2ec4a2f 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -17978,6 +17978,274 @@ public function write($output) { } +class GetValidWriteIdsRequest { + static $_TSPEC; + + /** + * @var string + */ + public $dbName = null; + /** + * @var string + */ + public $tblName = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tblName', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tblName'])) { + $this->tblName = $vals['tblName']; + } + } + } + + public function getName() { + return 'GetValidWriteIdsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tblName); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetValidWriteIdsRequest'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tblName !== null) { + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeString($this->tblName); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetValidWriteIdsResult { + static $_TSPEC; + + /** + * @var int + */ + public $lowWatermarkId = null; + /** + * @var int + */ + public $highWatermarkId = null; + /** + * @var bool + */ + public $areIdsValid = null; + /** + * @var int[] + */ + public $ids = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'lowWatermarkId', + 'type' => TType::I64, + ), + 2 => array( + 'var' => 'highWatermarkId', + 'type' => TType::I64, + ), + 3 => array( + 'var' => 'areIdsValid', + 'type' => TType::BOOL, + ), + 4 => array( + 'var' => 'ids', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['lowWatermarkId'])) { + $this->lowWatermarkId = $vals['lowWatermarkId']; + } + if (isset($vals['highWatermarkId'])) { + $this->highWatermarkId = $vals['highWatermarkId']; + } + if (isset($vals['areIdsValid'])) { + $this->areIdsValid = $vals['areIdsValid']; + } + if (isset($vals['ids'])) { + $this->ids = $vals['ids']; + } + } + } + + public function getName() { + return 'GetValidWriteIdsResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->lowWatermarkId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->highWatermarkId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->areIdsValid); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::LST) { + $this->ids = array(); + $_size562 = 0; + $_etype565 = 0; + $xfer += $input->readListBegin($_etype565, $_size562); + for ($_i566 = 0; $_i566 < $_size562; ++$_i566) + { + $elem567 = null; + $xfer += $input->readI64($elem567); + $this->ids []= $elem567; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetValidWriteIdsResult'); + if ($this->lowWatermarkId !== null) { + $xfer += $output->writeFieldBegin('lowWatermarkId', TType::I64, 1); + $xfer += $output->writeI64($this->lowWatermarkId); + $xfer += $output->writeFieldEnd(); + } + if ($this->highWatermarkId !== null) { + $xfer += $output->writeFieldBegin('highWatermarkId', TType::I64, 2); + $xfer += $output->writeI64($this->highWatermarkId); + $xfer += $output->writeFieldEnd(); + } + if ($this->areIdsValid !== null) { + $xfer += $output->writeFieldBegin('areIdsValid', TType::BOOL, 3); + $xfer += $output->writeBool($this->areIdsValid); + $xfer += $output->writeFieldEnd(); + } + if ($this->ids !== null) { + if (!is_array($this->ids)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('ids', TType::LST, 4); + { + $output->writeListBegin(TType::I64, count($this->ids)); + { + foreach ($this->ids as $iter568) + { + $xfer += $output->writeI64($iter568); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class GetAllFunctionsResponse { static $_TSPEC; @@ -18029,15 +18297,15 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size562 = 0; - $_etype565 = 0; - $xfer += $input->readListBegin($_etype565, $_size562); - for ($_i566 = 0; $_i566 < $_size562; ++$_i566) + $_size569 = 0; + $_etype572 = 0; + $xfer += $input->readListBegin($_etype572, $_size569); + for ($_i573 = 0; $_i573 < $_size569; ++$_i573) { - $elem567 = null; - $elem567 = new \metastore\Function(); - $xfer += $elem567->read($input); - $this->functions []= $elem567; + $elem574 = null; + $elem574 = new \metastore\Function(); + $xfer += $elem574->read($input); + $this->functions []= $elem574; } $xfer += $input->readListEnd(); } else { @@ -18065,9 +18333,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter568) + foreach ($this->functions as $iter575) { - $xfer += $iter568->write($output); + $xfer += $iter575->write($output); } } $output->writeListEnd(); diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 13be322fc008..70fbc088e8b7 100755 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -176,6 +176,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req)') print(' FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req)') print(' HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req)') + print(' GetValidWriteIdsResult get_valid_write_ids(GetValidWriteIdsRequest req)') print(' string getName()') print(' string getVersion()') print(' fb_status getStatus()') @@ -1157,6 +1158,12 @@ elif cmd == 'heartbeat_write_id': sys.exit(1) pp.pprint(client.heartbeat_write_id(eval(args[0]),)) +elif cmd == 'get_valid_write_ids': + if len(args) != 1: + print('get_valid_write_ids requires 1 args') + sys.exit(1) + pp.pprint(client.get_valid_write_ids(eval(args[0]),)) + elif cmd == 'getName': if len(args) != 0: print('getName requires 0 args') diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 137764878219..22c0cc6f9276 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1221,6 +1221,13 @@ def heartbeat_write_id(self, req): """ pass + def get_valid_write_ids(self, req): + """ + Parameters: + - req + """ + pass + class Client(fb303.FacebookService.Client, Iface): """ @@ -6710,6 +6717,37 @@ def recv_heartbeat_write_id(self): return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "heartbeat_write_id failed: unknown result") + def get_valid_write_ids(self, req): + """ + Parameters: + - req + """ + self.send_get_valid_write_ids(req) + return self.recv_get_valid_write_ids() + + def send_get_valid_write_ids(self, req): + self._oprot.writeMessageBegin('get_valid_write_ids', TMessageType.CALL, self._seqid) + args = get_valid_write_ids_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_valid_write_ids(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_valid_write_ids_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_valid_write_ids failed: unknown result") + class Processor(fb303.FacebookService.Processor, Iface, TProcessor): def __init__(self, handler): @@ -6866,6 +6904,7 @@ def __init__(self, handler): self._processMap["get_next_write_id"] = Processor.process_get_next_write_id self._processMap["finalize_write_id"] = Processor.process_finalize_write_id self._processMap["heartbeat_write_id"] = Processor.process_heartbeat_write_id + self._processMap["get_valid_write_ids"] = Processor.process_get_valid_write_ids def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() @@ -10586,6 +10625,25 @@ def process_heartbeat_write_id(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_valid_write_ids(self, seqid, iprot, oprot): + args = get_valid_write_ids_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_valid_write_ids_result() + try: + result.success = self._handler.get_valid_write_ids(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_valid_write_ids", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + # HELPER FUNCTIONS AND STRUCTURES @@ -11472,10 +11530,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype569, _size566) = iprot.readListBegin() - for _i570 in xrange(_size566): - _elem571 = iprot.readString() - self.success.append(_elem571) + (_etype576, _size573) = iprot.readListBegin() + for _i577 in xrange(_size573): + _elem578 = iprot.readString() + self.success.append(_elem578) iprot.readListEnd() else: iprot.skip(ftype) @@ -11498,8 +11556,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter572 in self.success: - oprot.writeString(iter572) + for iter579 in self.success: + oprot.writeString(iter579) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11604,10 +11662,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype576, _size573) = iprot.readListBegin() - for _i577 in xrange(_size573): - _elem578 = iprot.readString() - self.success.append(_elem578) + (_etype583, _size580) = iprot.readListBegin() + for _i584 in xrange(_size580): + _elem585 = iprot.readString() + self.success.append(_elem585) iprot.readListEnd() else: iprot.skip(ftype) @@ -11630,8 +11688,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter579 in self.success: - oprot.writeString(iter579) + for iter586 in self.success: + oprot.writeString(iter586) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12401,12 +12459,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype581, _vtype582, _size580 ) = iprot.readMapBegin() - for _i584 in xrange(_size580): - _key585 = iprot.readString() - _val586 = Type() - _val586.read(iprot) - self.success[_key585] = _val586 + (_ktype588, _vtype589, _size587 ) = iprot.readMapBegin() + for _i591 in xrange(_size587): + _key592 = iprot.readString() + _val593 = Type() + _val593.read(iprot) + self.success[_key592] = _val593 iprot.readMapEnd() else: iprot.skip(ftype) @@ -12429,9 +12487,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter587,viter588 in self.success.items(): - oprot.writeString(kiter587) - viter588.write(oprot) + for kiter594,viter595 in self.success.items(): + oprot.writeString(kiter594) + viter595.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -12574,11 +12632,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype592, _size589) = iprot.readListBegin() - for _i593 in xrange(_size589): - _elem594 = FieldSchema() - _elem594.read(iprot) - self.success.append(_elem594) + (_etype599, _size596) = iprot.readListBegin() + for _i600 in xrange(_size596): + _elem601 = FieldSchema() + _elem601.read(iprot) + self.success.append(_elem601) iprot.readListEnd() else: iprot.skip(ftype) @@ -12613,8 +12671,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter595 in self.success: - iter595.write(oprot) + for iter602 in self.success: + iter602.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12763,199 +12821,6 @@ class get_fields_with_environment_context_result: (3, TType.STRUCT, 'o3', (UnknownDBException, UnknownDBException.thrift_spec), None, ), # 3 ) - def __init__(self, success=None, o1=None, o2=None, o3=None,): - self.success = success - self.o1 = o1 - self.o2 = o2 - self.o3 = o3 - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 0: - if ftype == TType.LIST: - self.success = [] - (_etype599, _size596) = iprot.readListBegin() - for _i600 in xrange(_size596): - _elem601 = FieldSchema() - _elem601.read(iprot) - self.success.append(_elem601) - iprot.readListEnd() - else: - iprot.skip(ftype) - elif fid == 1: - if ftype == TType.STRUCT: - self.o1 = MetaException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = UnknownTableException() - self.o2.read(iprot) - else: - iprot.skip(ftype) - elif fid == 3: - if ftype == TType.STRUCT: - self.o3 = UnknownDBException() - self.o3.read(iprot) - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('get_fields_with_environment_context_result') - if self.success is not None: - oprot.writeFieldBegin('success', TType.LIST, 0) - oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter602 in self.success: - iter602.write(oprot) - oprot.writeListEnd() - oprot.writeFieldEnd() - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() - if self.o3 is not None: - oprot.writeFieldBegin('o3', TType.STRUCT, 3) - self.o3.write(oprot) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - value = (value * 31) ^ hash(self.success) - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) - value = (value * 31) ^ hash(self.o3) - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class get_schema_args: - """ - Attributes: - - db_name - - table_name - """ - - thrift_spec = ( - None, # 0 - (1, TType.STRING, 'db_name', None, None, ), # 1 - (2, TType.STRING, 'table_name', None, None, ), # 2 - ) - - def __init__(self, db_name=None, table_name=None,): - self.db_name = db_name - self.table_name = table_name - - def read(self, iprot): - if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: - fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) - return - iprot.readStructBegin() - while True: - (fname, ftype, fid) = iprot.readFieldBegin() - if ftype == TType.STOP: - break - if fid == 1: - if ftype == TType.STRING: - self.db_name = iprot.readString() - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRING: - self.table_name = iprot.readString() - else: - iprot.skip(ftype) - else: - iprot.skip(ftype) - iprot.readFieldEnd() - iprot.readStructEnd() - - def write(self, oprot): - if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: - oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) - return - oprot.writeStructBegin('get_schema_args') - if self.db_name is not None: - oprot.writeFieldBegin('db_name', TType.STRING, 1) - oprot.writeString(self.db_name) - oprot.writeFieldEnd() - if self.table_name is not None: - oprot.writeFieldBegin('table_name', TType.STRING, 2) - oprot.writeString(self.table_name) - oprot.writeFieldEnd() - oprot.writeFieldStop() - oprot.writeStructEnd() - - def validate(self): - return - - - def __hash__(self): - value = 17 - value = (value * 31) ^ hash(self.db_name) - value = (value * 31) ^ hash(self.table_name) - return value - - def __repr__(self): - L = ['%s=%r' % (key, value) - for key, value in self.__dict__.iteritems()] - return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ - - def __ne__(self, other): - return not (self == other) - -class get_schema_result: - """ - Attributes: - - success - - o1 - - o2 - - o3 - """ - - thrift_spec = ( - (0, TType.LIST, 'success', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 0 - (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (UnknownTableException, UnknownTableException.thrift_spec), None, ), # 2 - (3, TType.STRUCT, 'o3', (UnknownDBException, UnknownDBException.thrift_spec), None, ), # 3 - ) - def __init__(self, success=None, o1=None, o2=None, o3=None,): self.success = success self.o1 = o1 @@ -13009,7 +12874,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_schema_result') + oprot.writeStructBegin('get_fields_with_environment_context_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) @@ -13055,6 +12920,199 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class get_schema_args: + """ + Attributes: + - db_name + - table_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'db_name', None, None, ), # 1 + (2, TType.STRING, 'table_name', None, None, ), # 2 + ) + + def __init__(self, db_name=None, table_name=None,): + self.db_name = db_name + self.table_name = table_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_schema_args') + if self.db_name is not None: + oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeString(self.table_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.db_name) + value = (value * 31) ^ hash(self.table_name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_schema_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (UnknownTableException, UnknownTableException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (UnknownDBException, UnknownDBException.thrift_spec), None, ), # 3 + ) + + def __init__(self, success=None, o1=None, o2=None, o3=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype613, _size610) = iprot.readListBegin() + for _i614 in xrange(_size610): + _elem615 = FieldSchema() + _elem615.read(iprot) + self.success.append(_elem615) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = UnknownTableException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = UnknownDBException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_schema_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter616 in self.success: + iter616.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + value = (value * 31) ^ hash(self.o3) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class get_schema_with_environment_context_args: """ Attributes: @@ -13181,11 +13239,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype613, _size610) = iprot.readListBegin() - for _i614 in xrange(_size610): - _elem615 = FieldSchema() - _elem615.read(iprot) - self.success.append(_elem615) + (_etype620, _size617) = iprot.readListBegin() + for _i621 in xrange(_size617): + _elem622 = FieldSchema() + _elem622.read(iprot) + self.success.append(_elem622) iprot.readListEnd() else: iprot.skip(ftype) @@ -13220,8 +13278,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter616 in self.success: - iter616.write(oprot) + for iter623 in self.success: + iter623.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13662,22 +13720,22 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype620, _size617) = iprot.readListBegin() - for _i621 in xrange(_size617): - _elem622 = SQLPrimaryKey() - _elem622.read(iprot) - self.primaryKeys.append(_elem622) + (_etype627, _size624) = iprot.readListBegin() + for _i628 in xrange(_size624): + _elem629 = SQLPrimaryKey() + _elem629.read(iprot) + self.primaryKeys.append(_elem629) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype626, _size623) = iprot.readListBegin() - for _i627 in xrange(_size623): - _elem628 = SQLForeignKey() - _elem628.read(iprot) - self.foreignKeys.append(_elem628) + (_etype633, _size630) = iprot.readListBegin() + for _i634 in xrange(_size630): + _elem635 = SQLForeignKey() + _elem635.read(iprot) + self.foreignKeys.append(_elem635) iprot.readListEnd() else: iprot.skip(ftype) @@ -13698,15 +13756,15 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter629 in self.primaryKeys: - iter629.write(oprot) + for iter636 in self.primaryKeys: + iter636.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter630 in self.foreignKeys: - iter630.write(oprot) + for iter637 in self.foreignKeys: + iter637.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14742,10 +14800,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype634, _size631) = iprot.readListBegin() - for _i635 in xrange(_size631): - _elem636 = iprot.readString() - self.success.append(_elem636) + (_etype641, _size638) = iprot.readListBegin() + for _i642 in xrange(_size638): + _elem643 = iprot.readString() + self.success.append(_elem643) iprot.readListEnd() else: iprot.skip(ftype) @@ -14768,8 +14826,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter637 in self.success: - oprot.writeString(iter637) + for iter644 in self.success: + oprot.writeString(iter644) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14842,10 +14900,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype641, _size638) = iprot.readListBegin() - for _i642 in xrange(_size638): - _elem643 = iprot.readString() - self.tbl_types.append(_elem643) + (_etype648, _size645) = iprot.readListBegin() + for _i649 in xrange(_size645): + _elem650 = iprot.readString() + self.tbl_types.append(_elem650) iprot.readListEnd() else: iprot.skip(ftype) @@ -14870,8 +14928,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter644 in self.tbl_types: - oprot.writeString(iter644) + for iter651 in self.tbl_types: + oprot.writeString(iter651) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14927,11 +14985,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype648, _size645) = iprot.readListBegin() - for _i649 in xrange(_size645): - _elem650 = TableMeta() - _elem650.read(iprot) - self.success.append(_elem650) + (_etype655, _size652) = iprot.readListBegin() + for _i656 in xrange(_size652): + _elem657 = TableMeta() + _elem657.read(iprot) + self.success.append(_elem657) iprot.readListEnd() else: iprot.skip(ftype) @@ -14954,8 +15012,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter651 in self.success: - iter651.write(oprot) + for iter658 in self.success: + iter658.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15079,10 +15137,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype655, _size652) = iprot.readListBegin() - for _i656 in xrange(_size652): - _elem657 = iprot.readString() - self.success.append(_elem657) + (_etype662, _size659) = iprot.readListBegin() + for _i663 in xrange(_size659): + _elem664 = iprot.readString() + self.success.append(_elem664) iprot.readListEnd() else: iprot.skip(ftype) @@ -15105,8 +15163,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter658 in self.success: - oprot.writeString(iter658) + for iter665 in self.success: + oprot.writeString(iter665) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15342,10 +15400,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype662, _size659) = iprot.readListBegin() - for _i663 in xrange(_size659): - _elem664 = iprot.readString() - self.tbl_names.append(_elem664) + (_etype669, _size666) = iprot.readListBegin() + for _i670 in xrange(_size666): + _elem671 = iprot.readString() + self.tbl_names.append(_elem671) iprot.readListEnd() else: iprot.skip(ftype) @@ -15366,8 +15424,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter665 in self.tbl_names: - oprot.writeString(iter665) + for iter672 in self.tbl_names: + oprot.writeString(iter672) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15428,11 +15486,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype669, _size666) = iprot.readListBegin() - for _i670 in xrange(_size666): - _elem671 = Table() - _elem671.read(iprot) - self.success.append(_elem671) + (_etype676, _size673) = iprot.readListBegin() + for _i677 in xrange(_size673): + _elem678 = Table() + _elem678.read(iprot) + self.success.append(_elem678) iprot.readListEnd() else: iprot.skip(ftype) @@ -15467,8 +15525,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter672 in self.success: - iter672.write(oprot) + for iter679 in self.success: + iter679.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15634,10 +15692,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype676, _size673) = iprot.readListBegin() - for _i677 in xrange(_size673): - _elem678 = iprot.readString() - self.success.append(_elem678) + (_etype683, _size680) = iprot.readListBegin() + for _i684 in xrange(_size680): + _elem685 = iprot.readString() + self.success.append(_elem685) iprot.readListEnd() else: iprot.skip(ftype) @@ -15672,8 +15730,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter679 in self.success: - oprot.writeString(iter679) + for iter686 in self.success: + oprot.writeString(iter686) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16643,11 +16701,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype683, _size680) = iprot.readListBegin() - for _i684 in xrange(_size680): - _elem685 = Partition() - _elem685.read(iprot) - self.new_parts.append(_elem685) + (_etype690, _size687) = iprot.readListBegin() + for _i691 in xrange(_size687): + _elem692 = Partition() + _elem692.read(iprot) + self.new_parts.append(_elem692) iprot.readListEnd() else: iprot.skip(ftype) @@ -16664,8 +16722,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter686 in self.new_parts: - iter686.write(oprot) + for iter693 in self.new_parts: + iter693.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16823,11 +16881,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype690, _size687) = iprot.readListBegin() - for _i691 in xrange(_size687): - _elem692 = PartitionSpec() - _elem692.read(iprot) - self.new_parts.append(_elem692) + (_etype697, _size694) = iprot.readListBegin() + for _i698 in xrange(_size694): + _elem699 = PartitionSpec() + _elem699.read(iprot) + self.new_parts.append(_elem699) iprot.readListEnd() else: iprot.skip(ftype) @@ -16844,8 +16902,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter693 in self.new_parts: - iter693.write(oprot) + for iter700 in self.new_parts: + iter700.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17019,10 +17077,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype697, _size694) = iprot.readListBegin() - for _i698 in xrange(_size694): - _elem699 = iprot.readString() - self.part_vals.append(_elem699) + (_etype704, _size701) = iprot.readListBegin() + for _i705 in xrange(_size701): + _elem706 = iprot.readString() + self.part_vals.append(_elem706) iprot.readListEnd() else: iprot.skip(ftype) @@ -17047,8 +17105,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter700 in self.part_vals: - oprot.writeString(iter700) + for iter707 in self.part_vals: + oprot.writeString(iter707) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17401,10 +17459,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype704, _size701) = iprot.readListBegin() - for _i705 in xrange(_size701): - _elem706 = iprot.readString() - self.part_vals.append(_elem706) + (_etype711, _size708) = iprot.readListBegin() + for _i712 in xrange(_size708): + _elem713 = iprot.readString() + self.part_vals.append(_elem713) iprot.readListEnd() else: iprot.skip(ftype) @@ -17435,8 +17493,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter707 in self.part_vals: - oprot.writeString(iter707) + for iter714 in self.part_vals: + oprot.writeString(iter714) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -18031,10 +18089,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype711, _size708) = iprot.readListBegin() - for _i712 in xrange(_size708): - _elem713 = iprot.readString() - self.part_vals.append(_elem713) + (_etype718, _size715) = iprot.readListBegin() + for _i719 in xrange(_size715): + _elem720 = iprot.readString() + self.part_vals.append(_elem720) iprot.readListEnd() else: iprot.skip(ftype) @@ -18064,8 +18122,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter714 in self.part_vals: - oprot.writeString(iter714) + for iter721 in self.part_vals: + oprot.writeString(iter721) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -18238,10 +18296,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype718, _size715) = iprot.readListBegin() - for _i719 in xrange(_size715): - _elem720 = iprot.readString() - self.part_vals.append(_elem720) + (_etype725, _size722) = iprot.readListBegin() + for _i726 in xrange(_size722): + _elem727 = iprot.readString() + self.part_vals.append(_elem727) iprot.readListEnd() else: iprot.skip(ftype) @@ -18277,8 +18335,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter721 in self.part_vals: - oprot.writeString(iter721) + for iter728 in self.part_vals: + oprot.writeString(iter728) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -19015,10 +19073,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype725, _size722) = iprot.readListBegin() - for _i726 in xrange(_size722): - _elem727 = iprot.readString() - self.part_vals.append(_elem727) + (_etype732, _size729) = iprot.readListBegin() + for _i733 in xrange(_size729): + _elem734 = iprot.readString() + self.part_vals.append(_elem734) iprot.readListEnd() else: iprot.skip(ftype) @@ -19043,8 +19101,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter728 in self.part_vals: - oprot.writeString(iter728) + for iter735 in self.part_vals: + oprot.writeString(iter735) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19203,11 +19261,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype730, _vtype731, _size729 ) = iprot.readMapBegin() - for _i733 in xrange(_size729): - _key734 = iprot.readString() - _val735 = iprot.readString() - self.partitionSpecs[_key734] = _val735 + (_ktype737, _vtype738, _size736 ) = iprot.readMapBegin() + for _i740 in xrange(_size736): + _key741 = iprot.readString() + _val742 = iprot.readString() + self.partitionSpecs[_key741] = _val742 iprot.readMapEnd() else: iprot.skip(ftype) @@ -19244,9 +19302,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter736,viter737 in self.partitionSpecs.items(): - oprot.writeString(kiter736) - oprot.writeString(viter737) + for kiter743,viter744 in self.partitionSpecs.items(): + oprot.writeString(kiter743) + oprot.writeString(viter744) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -19451,11 +19509,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype739, _vtype740, _size738 ) = iprot.readMapBegin() - for _i742 in xrange(_size738): - _key743 = iprot.readString() - _val744 = iprot.readString() - self.partitionSpecs[_key743] = _val744 + (_ktype746, _vtype747, _size745 ) = iprot.readMapBegin() + for _i749 in xrange(_size745): + _key750 = iprot.readString() + _val751 = iprot.readString() + self.partitionSpecs[_key750] = _val751 iprot.readMapEnd() else: iprot.skip(ftype) @@ -19492,9 +19550,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter745,viter746 in self.partitionSpecs.items(): - oprot.writeString(kiter745) - oprot.writeString(viter746) + for kiter752,viter753 in self.partitionSpecs.items(): + oprot.writeString(kiter752) + oprot.writeString(viter753) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -19577,11 +19635,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype750, _size747) = iprot.readListBegin() - for _i751 in xrange(_size747): - _elem752 = Partition() - _elem752.read(iprot) - self.success.append(_elem752) + (_etype757, _size754) = iprot.readListBegin() + for _i758 in xrange(_size754): + _elem759 = Partition() + _elem759.read(iprot) + self.success.append(_elem759) iprot.readListEnd() else: iprot.skip(ftype) @@ -19622,8 +19680,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter753 in self.success: - iter753.write(oprot) + for iter760 in self.success: + iter760.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19717,10 +19775,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype757, _size754) = iprot.readListBegin() - for _i758 in xrange(_size754): - _elem759 = iprot.readString() - self.part_vals.append(_elem759) + (_etype764, _size761) = iprot.readListBegin() + for _i765 in xrange(_size761): + _elem766 = iprot.readString() + self.part_vals.append(_elem766) iprot.readListEnd() else: iprot.skip(ftype) @@ -19732,10 +19790,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype763, _size760) = iprot.readListBegin() - for _i764 in xrange(_size760): - _elem765 = iprot.readString() - self.group_names.append(_elem765) + (_etype770, _size767) = iprot.readListBegin() + for _i771 in xrange(_size767): + _elem772 = iprot.readString() + self.group_names.append(_elem772) iprot.readListEnd() else: iprot.skip(ftype) @@ -19760,8 +19818,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter766 in self.part_vals: - oprot.writeString(iter766) + for iter773 in self.part_vals: + oprot.writeString(iter773) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -19771,8 +19829,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter767 in self.group_names: - oprot.writeString(iter767) + for iter774 in self.group_names: + oprot.writeString(iter774) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20201,11 +20259,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype771, _size768) = iprot.readListBegin() - for _i772 in xrange(_size768): - _elem773 = Partition() - _elem773.read(iprot) - self.success.append(_elem773) + (_etype778, _size775) = iprot.readListBegin() + for _i779 in xrange(_size775): + _elem780 = Partition() + _elem780.read(iprot) + self.success.append(_elem780) iprot.readListEnd() else: iprot.skip(ftype) @@ -20234,8 +20292,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter774 in self.success: - iter774.write(oprot) + for iter781 in self.success: + iter781.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20329,10 +20387,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype778, _size775) = iprot.readListBegin() - for _i779 in xrange(_size775): - _elem780 = iprot.readString() - self.group_names.append(_elem780) + (_etype785, _size782) = iprot.readListBegin() + for _i786 in xrange(_size782): + _elem787 = iprot.readString() + self.group_names.append(_elem787) iprot.readListEnd() else: iprot.skip(ftype) @@ -20365,8 +20423,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter781 in self.group_names: - oprot.writeString(iter781) + for iter788 in self.group_names: + oprot.writeString(iter788) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20427,11 +20485,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype785, _size782) = iprot.readListBegin() - for _i786 in xrange(_size782): - _elem787 = Partition() - _elem787.read(iprot) - self.success.append(_elem787) + (_etype792, _size789) = iprot.readListBegin() + for _i793 in xrange(_size789): + _elem794 = Partition() + _elem794.read(iprot) + self.success.append(_elem794) iprot.readListEnd() else: iprot.skip(ftype) @@ -20460,8 +20518,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter788 in self.success: - iter788.write(oprot) + for iter795 in self.success: + iter795.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20619,11 +20677,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype792, _size789) = iprot.readListBegin() - for _i793 in xrange(_size789): - _elem794 = PartitionSpec() - _elem794.read(iprot) - self.success.append(_elem794) + (_etype799, _size796) = iprot.readListBegin() + for _i800 in xrange(_size796): + _elem801 = PartitionSpec() + _elem801.read(iprot) + self.success.append(_elem801) iprot.readListEnd() else: iprot.skip(ftype) @@ -20652,8 +20710,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter795 in self.success: - iter795.write(oprot) + for iter802 in self.success: + iter802.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20808,10 +20866,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype799, _size796) = iprot.readListBegin() - for _i800 in xrange(_size796): - _elem801 = iprot.readString() - self.success.append(_elem801) + (_etype806, _size803) = iprot.readListBegin() + for _i807 in xrange(_size803): + _elem808 = iprot.readString() + self.success.append(_elem808) iprot.readListEnd() else: iprot.skip(ftype) @@ -20834,8 +20892,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter802 in self.success: - oprot.writeString(iter802) + for iter809 in self.success: + oprot.writeString(iter809) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -20911,10 +20969,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype806, _size803) = iprot.readListBegin() - for _i807 in xrange(_size803): - _elem808 = iprot.readString() - self.part_vals.append(_elem808) + (_etype813, _size810) = iprot.readListBegin() + for _i814 in xrange(_size810): + _elem815 = iprot.readString() + self.part_vals.append(_elem815) iprot.readListEnd() else: iprot.skip(ftype) @@ -20944,8 +21002,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter809 in self.part_vals: - oprot.writeString(iter809) + for iter816 in self.part_vals: + oprot.writeString(iter816) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -21009,11 +21067,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype813, _size810) = iprot.readListBegin() - for _i814 in xrange(_size810): - _elem815 = Partition() - _elem815.read(iprot) - self.success.append(_elem815) + (_etype820, _size817) = iprot.readListBegin() + for _i821 in xrange(_size817): + _elem822 = Partition() + _elem822.read(iprot) + self.success.append(_elem822) iprot.readListEnd() else: iprot.skip(ftype) @@ -21042,8 +21100,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter816 in self.success: - iter816.write(oprot) + for iter823 in self.success: + iter823.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21130,10 +21188,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype820, _size817) = iprot.readListBegin() - for _i821 in xrange(_size817): - _elem822 = iprot.readString() - self.part_vals.append(_elem822) + (_etype827, _size824) = iprot.readListBegin() + for _i828 in xrange(_size824): + _elem829 = iprot.readString() + self.part_vals.append(_elem829) iprot.readListEnd() else: iprot.skip(ftype) @@ -21150,10 +21208,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype826, _size823) = iprot.readListBegin() - for _i827 in xrange(_size823): - _elem828 = iprot.readString() - self.group_names.append(_elem828) + (_etype833, _size830) = iprot.readListBegin() + for _i834 in xrange(_size830): + _elem835 = iprot.readString() + self.group_names.append(_elem835) iprot.readListEnd() else: iprot.skip(ftype) @@ -21178,8 +21236,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter829 in self.part_vals: - oprot.writeString(iter829) + for iter836 in self.part_vals: + oprot.writeString(iter836) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -21193,8 +21251,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter830 in self.group_names: - oprot.writeString(iter830) + for iter837 in self.group_names: + oprot.writeString(iter837) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21256,11 +21314,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype834, _size831) = iprot.readListBegin() - for _i835 in xrange(_size831): - _elem836 = Partition() - _elem836.read(iprot) - self.success.append(_elem836) + (_etype841, _size838) = iprot.readListBegin() + for _i842 in xrange(_size838): + _elem843 = Partition() + _elem843.read(iprot) + self.success.append(_elem843) iprot.readListEnd() else: iprot.skip(ftype) @@ -21289,8 +21347,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter837 in self.success: - iter837.write(oprot) + for iter844 in self.success: + iter844.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21371,10 +21429,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype841, _size838) = iprot.readListBegin() - for _i842 in xrange(_size838): - _elem843 = iprot.readString() - self.part_vals.append(_elem843) + (_etype848, _size845) = iprot.readListBegin() + for _i849 in xrange(_size845): + _elem850 = iprot.readString() + self.part_vals.append(_elem850) iprot.readListEnd() else: iprot.skip(ftype) @@ -21404,8 +21462,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter844 in self.part_vals: - oprot.writeString(iter844) + for iter851 in self.part_vals: + oprot.writeString(iter851) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -21469,10 +21527,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype848, _size845) = iprot.readListBegin() - for _i849 in xrange(_size845): - _elem850 = iprot.readString() - self.success.append(_elem850) + (_etype855, _size852) = iprot.readListBegin() + for _i856 in xrange(_size852): + _elem857 = iprot.readString() + self.success.append(_elem857) iprot.readListEnd() else: iprot.skip(ftype) @@ -21501,8 +21559,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter851 in self.success: - oprot.writeString(iter851) + for iter858 in self.success: + oprot.writeString(iter858) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21673,11 +21731,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype855, _size852) = iprot.readListBegin() - for _i856 in xrange(_size852): - _elem857 = Partition() - _elem857.read(iprot) - self.success.append(_elem857) + (_etype862, _size859) = iprot.readListBegin() + for _i863 in xrange(_size859): + _elem864 = Partition() + _elem864.read(iprot) + self.success.append(_elem864) iprot.readListEnd() else: iprot.skip(ftype) @@ -21706,8 +21764,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter858 in self.success: - iter858.write(oprot) + for iter865 in self.success: + iter865.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21878,11 +21936,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype862, _size859) = iprot.readListBegin() - for _i863 in xrange(_size859): - _elem864 = PartitionSpec() - _elem864.read(iprot) - self.success.append(_elem864) + (_etype869, _size866) = iprot.readListBegin() + for _i870 in xrange(_size866): + _elem871 = PartitionSpec() + _elem871.read(iprot) + self.success.append(_elem871) iprot.readListEnd() else: iprot.skip(ftype) @@ -21911,8 +21969,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter865 in self.success: - iter865.write(oprot) + for iter872 in self.success: + iter872.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22332,10 +22390,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype869, _size866) = iprot.readListBegin() - for _i870 in xrange(_size866): - _elem871 = iprot.readString() - self.names.append(_elem871) + (_etype876, _size873) = iprot.readListBegin() + for _i877 in xrange(_size873): + _elem878 = iprot.readString() + self.names.append(_elem878) iprot.readListEnd() else: iprot.skip(ftype) @@ -22360,8 +22418,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter872 in self.names: - oprot.writeString(iter872) + for iter879 in self.names: + oprot.writeString(iter879) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22420,11 +22478,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype876, _size873) = iprot.readListBegin() - for _i877 in xrange(_size873): - _elem878 = Partition() - _elem878.read(iprot) - self.success.append(_elem878) + (_etype883, _size880) = iprot.readListBegin() + for _i884 in xrange(_size880): + _elem885 = Partition() + _elem885.read(iprot) + self.success.append(_elem885) iprot.readListEnd() else: iprot.skip(ftype) @@ -22453,8 +22511,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter879 in self.success: - iter879.write(oprot) + for iter886 in self.success: + iter886.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22704,11 +22762,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype883, _size880) = iprot.readListBegin() - for _i884 in xrange(_size880): - _elem885 = Partition() - _elem885.read(iprot) - self.new_parts.append(_elem885) + (_etype890, _size887) = iprot.readListBegin() + for _i891 in xrange(_size887): + _elem892 = Partition() + _elem892.read(iprot) + self.new_parts.append(_elem892) iprot.readListEnd() else: iprot.skip(ftype) @@ -22733,8 +22791,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter886 in self.new_parts: - iter886.write(oprot) + for iter893 in self.new_parts: + iter893.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22887,11 +22945,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype890, _size887) = iprot.readListBegin() - for _i891 in xrange(_size887): - _elem892 = Partition() - _elem892.read(iprot) - self.new_parts.append(_elem892) + (_etype897, _size894) = iprot.readListBegin() + for _i898 in xrange(_size894): + _elem899 = Partition() + _elem899.read(iprot) + self.new_parts.append(_elem899) iprot.readListEnd() else: iprot.skip(ftype) @@ -22922,8 +22980,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter893 in self.new_parts: - iter893.write(oprot) + for iter900 in self.new_parts: + iter900.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -23267,10 +23325,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype897, _size894) = iprot.readListBegin() - for _i898 in xrange(_size894): - _elem899 = iprot.readString() - self.part_vals.append(_elem899) + (_etype904, _size901) = iprot.readListBegin() + for _i905 in xrange(_size901): + _elem906 = iprot.readString() + self.part_vals.append(_elem906) iprot.readListEnd() else: iprot.skip(ftype) @@ -23301,8 +23359,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter900 in self.part_vals: - oprot.writeString(iter900) + for iter907 in self.part_vals: + oprot.writeString(iter907) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -23444,10 +23502,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype904, _size901) = iprot.readListBegin() - for _i905 in xrange(_size901): - _elem906 = iprot.readString() - self.part_vals.append(_elem906) + (_etype911, _size908) = iprot.readListBegin() + for _i912 in xrange(_size908): + _elem913 = iprot.readString() + self.part_vals.append(_elem913) iprot.readListEnd() else: iprot.skip(ftype) @@ -23469,8 +23527,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter907 in self.part_vals: - oprot.writeString(iter907) + for iter914 in self.part_vals: + oprot.writeString(iter914) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -23828,10 +23886,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype911, _size908) = iprot.readListBegin() - for _i912 in xrange(_size908): - _elem913 = iprot.readString() - self.success.append(_elem913) + (_etype918, _size915) = iprot.readListBegin() + for _i919 in xrange(_size915): + _elem920 = iprot.readString() + self.success.append(_elem920) iprot.readListEnd() else: iprot.skip(ftype) @@ -23854,8 +23912,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter914 in self.success: - oprot.writeString(iter914) + for iter921 in self.success: + oprot.writeString(iter921) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23979,11 +24037,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype916, _vtype917, _size915 ) = iprot.readMapBegin() - for _i919 in xrange(_size915): - _key920 = iprot.readString() - _val921 = iprot.readString() - self.success[_key920] = _val921 + (_ktype923, _vtype924, _size922 ) = iprot.readMapBegin() + for _i926 in xrange(_size922): + _key927 = iprot.readString() + _val928 = iprot.readString() + self.success[_key927] = _val928 iprot.readMapEnd() else: iprot.skip(ftype) @@ -24006,9 +24064,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter922,viter923 in self.success.items(): - oprot.writeString(kiter922) - oprot.writeString(viter923) + for kiter929,viter930 in self.success.items(): + oprot.writeString(kiter929) + oprot.writeString(viter930) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24084,11 +24142,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype925, _vtype926, _size924 ) = iprot.readMapBegin() - for _i928 in xrange(_size924): - _key929 = iprot.readString() - _val930 = iprot.readString() - self.part_vals[_key929] = _val930 + (_ktype932, _vtype933, _size931 ) = iprot.readMapBegin() + for _i935 in xrange(_size931): + _key936 = iprot.readString() + _val937 = iprot.readString() + self.part_vals[_key936] = _val937 iprot.readMapEnd() else: iprot.skip(ftype) @@ -24118,9 +24176,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter931,viter932 in self.part_vals.items(): - oprot.writeString(kiter931) - oprot.writeString(viter932) + for kiter938,viter939 in self.part_vals.items(): + oprot.writeString(kiter938) + oprot.writeString(viter939) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -24334,11 +24392,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype934, _vtype935, _size933 ) = iprot.readMapBegin() - for _i937 in xrange(_size933): - _key938 = iprot.readString() - _val939 = iprot.readString() - self.part_vals[_key938] = _val939 + (_ktype941, _vtype942, _size940 ) = iprot.readMapBegin() + for _i944 in xrange(_size940): + _key945 = iprot.readString() + _val946 = iprot.readString() + self.part_vals[_key945] = _val946 iprot.readMapEnd() else: iprot.skip(ftype) @@ -24368,9 +24426,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter940,viter941 in self.part_vals.items(): - oprot.writeString(kiter940) - oprot.writeString(viter941) + for kiter947,viter948 in self.part_vals.items(): + oprot.writeString(kiter947) + oprot.writeString(viter948) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -25425,11 +25483,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype945, _size942) = iprot.readListBegin() - for _i946 in xrange(_size942): - _elem947 = Index() - _elem947.read(iprot) - self.success.append(_elem947) + (_etype952, _size949) = iprot.readListBegin() + for _i953 in xrange(_size949): + _elem954 = Index() + _elem954.read(iprot) + self.success.append(_elem954) iprot.readListEnd() else: iprot.skip(ftype) @@ -25458,8 +25516,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter948 in self.success: - iter948.write(oprot) + for iter955 in self.success: + iter955.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25614,10 +25672,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype952, _size949) = iprot.readListBegin() - for _i953 in xrange(_size949): - _elem954 = iprot.readString() - self.success.append(_elem954) + (_etype959, _size956) = iprot.readListBegin() + for _i960 in xrange(_size956): + _elem961 = iprot.readString() + self.success.append(_elem961) iprot.readListEnd() else: iprot.skip(ftype) @@ -25640,8 +25698,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter955 in self.success: - oprot.writeString(iter955) + for iter962 in self.success: + oprot.writeString(iter962) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -28507,10 +28565,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype959, _size956) = iprot.readListBegin() - for _i960 in xrange(_size956): - _elem961 = iprot.readString() - self.success.append(_elem961) + (_etype966, _size963) = iprot.readListBegin() + for _i967 in xrange(_size963): + _elem968 = iprot.readString() + self.success.append(_elem968) iprot.readListEnd() else: iprot.skip(ftype) @@ -28533,8 +28591,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter962 in self.success: - oprot.writeString(iter962) + for iter969 in self.success: + oprot.writeString(iter969) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29222,10 +29280,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype966, _size963) = iprot.readListBegin() - for _i967 in xrange(_size963): - _elem968 = iprot.readString() - self.success.append(_elem968) + (_etype973, _size970) = iprot.readListBegin() + for _i974 in xrange(_size970): + _elem975 = iprot.readString() + self.success.append(_elem975) iprot.readListEnd() else: iprot.skip(ftype) @@ -29248,8 +29306,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter969 in self.success: - oprot.writeString(iter969) + for iter976 in self.success: + oprot.writeString(iter976) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29763,11 +29821,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype973, _size970) = iprot.readListBegin() - for _i974 in xrange(_size970): - _elem975 = Role() - _elem975.read(iprot) - self.success.append(_elem975) + (_etype980, _size977) = iprot.readListBegin() + for _i981 in xrange(_size977): + _elem982 = Role() + _elem982.read(iprot) + self.success.append(_elem982) iprot.readListEnd() else: iprot.skip(ftype) @@ -29790,8 +29848,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter976 in self.success: - iter976.write(oprot) + for iter983 in self.success: + iter983.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30300,10 +30358,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype980, _size977) = iprot.readListBegin() - for _i981 in xrange(_size977): - _elem982 = iprot.readString() - self.group_names.append(_elem982) + (_etype987, _size984) = iprot.readListBegin() + for _i988 in xrange(_size984): + _elem989 = iprot.readString() + self.group_names.append(_elem989) iprot.readListEnd() else: iprot.skip(ftype) @@ -30328,8 +30386,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter983 in self.group_names: - oprot.writeString(iter983) + for iter990 in self.group_names: + oprot.writeString(iter990) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -30556,11 +30614,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype987, _size984) = iprot.readListBegin() - for _i988 in xrange(_size984): - _elem989 = HiveObjectPrivilege() - _elem989.read(iprot) - self.success.append(_elem989) + (_etype994, _size991) = iprot.readListBegin() + for _i995 in xrange(_size991): + _elem996 = HiveObjectPrivilege() + _elem996.read(iprot) + self.success.append(_elem996) iprot.readListEnd() else: iprot.skip(ftype) @@ -30583,8 +30641,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter990 in self.success: - iter990.write(oprot) + for iter997 in self.success: + iter997.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31082,10 +31140,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype994, _size991) = iprot.readListBegin() - for _i995 in xrange(_size991): - _elem996 = iprot.readString() - self.group_names.append(_elem996) + (_etype1001, _size998) = iprot.readListBegin() + for _i1002 in xrange(_size998): + _elem1003 = iprot.readString() + self.group_names.append(_elem1003) iprot.readListEnd() else: iprot.skip(ftype) @@ -31106,8 +31164,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter997 in self.group_names: - oprot.writeString(iter997) + for iter1004 in self.group_names: + oprot.writeString(iter1004) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31162,10 +31220,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1001, _size998) = iprot.readListBegin() - for _i1002 in xrange(_size998): - _elem1003 = iprot.readString() - self.success.append(_elem1003) + (_etype1008, _size1005) = iprot.readListBegin() + for _i1009 in xrange(_size1005): + _elem1010 = iprot.readString() + self.success.append(_elem1010) iprot.readListEnd() else: iprot.skip(ftype) @@ -31188,8 +31246,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1004 in self.success: - oprot.writeString(iter1004) + for iter1011 in self.success: + oprot.writeString(iter1011) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32121,10 +32179,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1008, _size1005) = iprot.readListBegin() - for _i1009 in xrange(_size1005): - _elem1010 = iprot.readString() - self.success.append(_elem1010) + (_etype1015, _size1012) = iprot.readListBegin() + for _i1016 in xrange(_size1012): + _elem1017 = iprot.readString() + self.success.append(_elem1017) iprot.readListEnd() else: iprot.skip(ftype) @@ -32141,8 +32199,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1011 in self.success: - oprot.writeString(iter1011) + for iter1018 in self.success: + oprot.writeString(iter1018) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -32669,10 +32727,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1015, _size1012) = iprot.readListBegin() - for _i1016 in xrange(_size1012): - _elem1017 = iprot.readString() - self.success.append(_elem1017) + (_etype1022, _size1019) = iprot.readListBegin() + for _i1023 in xrange(_size1019): + _elem1024 = iprot.readString() + self.success.append(_elem1024) iprot.readListEnd() else: iprot.skip(ftype) @@ -32689,8 +32747,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1018 in self.success: - oprot.writeString(iter1018) + for iter1025 in self.success: + oprot.writeString(iter1025) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -36265,6 +36323,137 @@ def validate(self): return + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_valid_write_ids_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (GetValidWriteIdsRequest, GetValidWriteIdsRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetValidWriteIdsRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_valid_write_ids_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_valid_write_ids_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (GetValidWriteIdsResult, GetValidWriteIdsResult.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = GetValidWriteIdsResult() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_valid_write_ids_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + def __hash__(self): value = 17 value = (value * 31) ^ hash(self.success) diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 8decc94d783c..53f24b981393 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -12667,6 +12667,204 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class GetValidWriteIdsRequest: + """ + Attributes: + - dbName + - tblName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tblName', None, None, ), # 2 + ) + + def __init__(self, dbName=None, tblName=None,): + self.dbName = dbName + self.tblName = tblName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetValidWriteIdsRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeString(self.tblName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tblName is None: + raise TProtocol.TProtocolException(message='Required field tblName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tblName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetValidWriteIdsResult: + """ + Attributes: + - lowWatermarkId + - highWatermarkId + - areIdsValid + - ids + """ + + thrift_spec = ( + None, # 0 + (1, TType.I64, 'lowWatermarkId', None, None, ), # 1 + (2, TType.I64, 'highWatermarkId', None, None, ), # 2 + (3, TType.BOOL, 'areIdsValid', None, None, ), # 3 + (4, TType.LIST, 'ids', (TType.I64,None), None, ), # 4 + ) + + def __init__(self, lowWatermarkId=None, highWatermarkId=None, areIdsValid=None, ids=None,): + self.lowWatermarkId = lowWatermarkId + self.highWatermarkId = highWatermarkId + self.areIdsValid = areIdsValid + self.ids = ids + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.lowWatermarkId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.highWatermarkId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.areIdsValid = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.ids = [] + (_etype562, _size559) = iprot.readListBegin() + for _i563 in xrange(_size559): + _elem564 = iprot.readI64() + self.ids.append(_elem564) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetValidWriteIdsResult') + if self.lowWatermarkId is not None: + oprot.writeFieldBegin('lowWatermarkId', TType.I64, 1) + oprot.writeI64(self.lowWatermarkId) + oprot.writeFieldEnd() + if self.highWatermarkId is not None: + oprot.writeFieldBegin('highWatermarkId', TType.I64, 2) + oprot.writeI64(self.highWatermarkId) + oprot.writeFieldEnd() + if self.areIdsValid is not None: + oprot.writeFieldBegin('areIdsValid', TType.BOOL, 3) + oprot.writeBool(self.areIdsValid) + oprot.writeFieldEnd() + if self.ids is not None: + oprot.writeFieldBegin('ids', TType.LIST, 4) + oprot.writeListBegin(TType.I64, len(self.ids)) + for iter565 in self.ids: + oprot.writeI64(iter565) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.lowWatermarkId is None: + raise TProtocol.TProtocolException(message='Required field lowWatermarkId is unset!') + if self.highWatermarkId is None: + raise TProtocol.TProtocolException(message='Required field highWatermarkId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.lowWatermarkId) + value = (value * 31) ^ hash(self.highWatermarkId) + value = (value * 31) ^ hash(self.areIdsValid) + value = (value * 31) ^ hash(self.ids) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class GetAllFunctionsResponse: """ Attributes: @@ -12693,11 +12891,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype562, _size559) = iprot.readListBegin() - for _i563 in xrange(_size559): - _elem564 = Function() - _elem564.read(iprot) - self.functions.append(_elem564) + (_etype569, _size566) = iprot.readListBegin() + for _i570 in xrange(_size566): + _elem571 = Function() + _elem571.read(iprot) + self.functions.append(_elem571) iprot.readListEnd() else: iprot.skip(ftype) @@ -12714,8 +12912,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter565 in self.functions: - iter565.write(oprot) + for iter572 in self.functions: + iter572.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 95f20753bdbf..ca60ba494000 100644 --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2876,6 +2876,50 @@ def validate ::Thrift::Struct.generate_accessors self end +class GetValidWriteIdsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBLNAME = 2 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName + end + + ::Thrift::Struct.generate_accessors self +end + +class GetValidWriteIdsResult + include ::Thrift::Struct, ::Thrift::Struct_Union + LOWWATERMARKID = 1 + HIGHWATERMARKID = 2 + AREIDSVALID = 3 + IDS = 4 + + FIELDS = { + LOWWATERMARKID => {:type => ::Thrift::Types::I64, :name => 'lowWatermarkId'}, + HIGHWATERMARKID => {:type => ::Thrift::Types::I64, :name => 'highWatermarkId'}, + AREIDSVALID => {:type => ::Thrift::Types::BOOL, :name => 'areIdsValid', :optional => true}, + IDS => {:type => ::Thrift::Types::LIST, :name => 'ids', :element => {:type => ::Thrift::Types::I64}, :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field lowWatermarkId is unset!') unless @lowWatermarkId + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field highWatermarkId is unset!') unless @highWatermarkId + end + + ::Thrift::Struct.generate_accessors self +end + class GetAllFunctionsResponse include ::Thrift::Struct, ::Thrift::Struct_Union FUNCTIONS = 1 diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 403e07f13d1d..613702f0616d 100644 --- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -2529,6 +2529,21 @@ def recv_heartbeat_write_id() raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'heartbeat_write_id failed: unknown result') end + def get_valid_write_ids(req) + send_get_valid_write_ids(req) + return recv_get_valid_write_ids() + end + + def send_get_valid_write_ids(req) + send_message('get_valid_write_ids', Get_valid_write_ids_args, :req => req) + end + + def recv_get_valid_write_ids() + result = receive_message(Get_valid_write_ids_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_valid_write_ids failed: unknown result') + end + end class Processor < ::FacebookService::Processor @@ -4396,6 +4411,13 @@ def process_heartbeat_write_id(seqid, iprot, oprot) write_result(result, oprot, 'heartbeat_write_id', seqid) end + def process_get_valid_write_ids(seqid, iprot, oprot) + args = read_args(iprot, Get_valid_write_ids_args) + result = Get_valid_write_ids_result.new() + result.success = @handler.get_valid_write_ids(args.req) + write_result(result, oprot, 'get_valid_write_ids', seqid) + end + end # HELPER FUNCTIONS AND STRUCTURES @@ -10092,5 +10114,37 @@ def validate ::Thrift::Struct.generate_accessors self end + class Get_valid_write_ids_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::GetValidWriteIdsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_valid_write_ids_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetValidWriteIdsResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + end diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index f99bcd2e1bbb..e1d41c4191a4 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -119,6 +119,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.sql.SQLException; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.AbstractMap; @@ -134,6 +135,7 @@ import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Random; import java.util.Set; import java.util.Timer; import java.util.concurrent.Callable; @@ -438,19 +440,19 @@ public void init() throws MetaException { updateMetrics(); LOG.info("Finished metadata count metrics: " + initDatabaseCount + " databases, " + initTableCount + " tables, " + initPartCount + " partitions."); - metrics.addGauge(MetricsConstant.INIT_TOTAL_DATABASES, new MetricsVariable() { + metrics.addGauge(MetricsConstant.INIT_TOTAL_DATABASES, new MetricsVariable() { @Override public Object getValue() { return initDatabaseCount; } }); - metrics.addGauge(MetricsConstant.INIT_TOTAL_TABLES, new MetricsVariable() { + metrics.addGauge(MetricsConstant.INIT_TOTAL_TABLES, new MetricsVariable() { @Override public Object getValue() { return initTableCount; } }); - metrics.addGauge(MetricsConstant.INIT_TOTAL_PARTITIONS, new MetricsVariable() { + metrics.addGauge(MetricsConstant.INIT_TOTAL_PARTITIONS, new MetricsVariable() { @Override public Object getValue() { return initPartCount; @@ -1264,26 +1266,6 @@ private boolean is_type_exists(RawStore ms, String typeName) return (ms.getType(typeName) != null); } - private void drop_type_core(final RawStore ms, String typeName) - throws NoSuchObjectException, MetaException { - boolean success = false; - try { - ms.openTransaction(); - // drop any partitions - if (!is_type_exists(ms, typeName)) { - throw new NoSuchObjectException(typeName + " doesn't exist"); - } - if (!ms.dropType(typeName)) { - throw new MetaException("Unable to drop type " + typeName); - } - success = ms.commitTransaction(); - } finally { - if (!success) { - ms.rollbackTransaction(); - } - } - } - @Override public boolean drop_type(final String name) throws MetaException, NoSuchObjectException { startFunction("drop_type", ": " + name); @@ -1818,7 +1800,7 @@ private List dropPartitionsAndGetLocations(RawStore ms, String dbName, //No drop part listener events fired for public listeners historically, for drop table case. //Limiting to internal listeners for now, to avoid unexpected calls for public listeners. if (listener instanceof HMSMetricsListener) { - for (Partition part : partsToDelete) { + for (@SuppressWarnings("unused") Partition part : partsToDelete) { listener.onDropPartition(null); } } @@ -2294,7 +2276,7 @@ private List add_partitions_core(final RawStore ms, } - partFutures.add(threadPool.submit(new Callable() { + partFutures.add(threadPool.submit(new Callable() { @Override public Partition call() throws Exception { boolean madeDir = createLocationForAddedPartition(table, part); @@ -2456,8 +2438,8 @@ private int add_partitions_pspec_core( LOG.info("Not adding partition " + part + " as it already exists"); continue; } - partFutures.add(threadPool.submit(new Callable() { - @Override public Object call() throws Exception { + partFutures.add(threadPool.submit(new Callable() { + @Override public Partition call() throws Exception { boolean madeDir = createLocationForAddedPartition(table, part); if (addedPartitions.put(new PartValEqWrapperLite(part), madeDir) != null) { // Technically, for ifNotExists case, we could insert one and discard the other @@ -2474,7 +2456,7 @@ private int add_partitions_pspec_core( try { for (Future partFuture : partFutures) { - Partition part = partFuture.get(); + partFuture.get(); } } catch (InterruptedException | ExecutionException e) { // cancel other tasks @@ -3777,6 +3759,7 @@ public List get_fields_with_environment_context(String db, String t } } + @SuppressWarnings("deprecation") Deserializer s = MetaStoreUtils.getDeserializer(curConf, tbl, false); ret = MetaStoreUtils.getFieldsFromDeserializer(tableName, s); } catch (SerDeException e) { @@ -5745,7 +5728,7 @@ public boolean partition_name_has_valid_characters(List part_vals, throw newMetaException(e); } } - endFunction("partition_name_has_valid_characters", true, null); + endFunction("partition_name_has_valid_characters", true, ex); return ret; } @@ -6044,21 +6027,6 @@ public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( return new GetRoleGrantsForPrincipalResponse(roleMaps); } - /** - * Convert each MRoleMap object into a thrift RolePrincipalGrant object - * @param roles - * @return - */ - private List getRolePrincipalGrants(List roles) throws MetaException { - List rolePrinGrantList = new ArrayList(); - if (roles != null) { - for (Role role : roles) { - rolePrinGrantList.addAll(getMS().listRoleMembers(role.getRoleName())); - } - } - return rolePrinGrantList; - } - @Override public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws NoSuchObjectException, MetaException, TException { @@ -6448,31 +6416,47 @@ private void throwMetaException(Exception e) throws MetaException, } } + private final Random random = new Random(); @Override public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws TException { RawStore ms = getMS(); String dbName = req.getDbName(), tblName = req.getTblName(); startFunction("get_next_write_id", " : db=" + dbName + " tbl=" + tblName); - Exception ex = null; + Exception exception = null; long writeId = -1; - // TODO# see TXN about how to handle conflicts try { - boolean ok = false; - ms.openTransaction(); - try { - Table tbl = ms.getTable(dbName, tblName); - if (tbl == null) { - throw new NoSuchObjectException(dbName + "." + tblName); + int deadlockTryCount = 10; + int deadlockRetryBackoffMs = 200; + while (deadlockTryCount > 0) { + boolean ok = false; + ms.openTransaction(); + try { + Table tbl = ms.getTable(dbName, tblName); + if (tbl == null) { + throw new NoSuchObjectException(dbName + "." + tblName); + } + writeId = tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : 0; + tbl.setMmNextWriteId(writeId + 1); + ms.alterTable(dbName, tblName, tbl); + ok = true; + } finally { + if (!ok) { + ms.rollbackTransaction(); + // Exception should propagate; don't override it by breaking out of the loop. + } else { + Boolean commitResult = ms.commitTransactionExpectDeadlock(); + if (commitResult != null) { + if (commitResult) break; // Assume no exception; ok to break out of the loop. + throw new MetaException("Failed to commit"); + } + } } - writeId = tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : 0; - tbl.setMmNextWriteId(writeId + 1); - ms.alterTable(dbName, tblName, tbl); - ok = true; - } finally { - commitOrRollback(ms, ok); + LOG.warn("Getting the next write ID failed due to a deadlock; retrying"); + Thread.sleep(random.nextInt(deadlockRetryBackoffMs)); } + // Do a separate txn after we have reserved the number. TODO: If we fail, ignore on read. - ok = false; + boolean ok = false; ms.openTransaction(); try { Table tbl = ms.getTable(dbName, tblName); @@ -6482,10 +6466,10 @@ public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws commitOrRollback(ms, ok); } } catch (Exception e) { - ex = e; + exception = e; throwMetaException(e); } finally { - endFunction("get_next_write_id", ex == null, ex, tblName); + endFunction("get_next_write_id", exception == null, exception, tblName); } return new GetNextWriteIdResult(writeId); } @@ -6562,10 +6546,65 @@ private MTableWrite getActiveTableWrite(RawStore ms, String dbName, assert tw.getState().length() == 1; char state = tw.getState().charAt(0); if (state != MM_WRITE_OPEN) { - throw new MetaException("Invalid write state to finalize: " + state); + throw new MetaException("Invalid write state: " + state); } return tw; } + + @Override + public GetValidWriteIdsResult get_valid_write_ids( + GetValidWriteIdsRequest req) throws TException { + RawStore ms = getMS(); + String dbName = req.getDbName(), tblName = req.getTblName(); + startFunction("get_valid_write_ids", " : db=" + dbName + " tbl=" + tblName); + GetValidWriteIdsResult result = new GetValidWriteIdsResult(); + Exception ex = null; + try { + boolean ok = false; + ms.openTransaction(); + try { + Table tbl = ms.getTable(dbName, tblName); + if (tbl == null) { + throw new InvalidObjectException(dbName + "." + tblName); + } + long nextId = tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : 0; + long watermarkId = tbl.isSetMmWatermarkWriteId() ? tbl.getMmWatermarkWriteId() : -1; + if (nextId > (watermarkId + 1)) { + // There may be some intermediate failed or active writes; get the valid ones. + List ids = ms.getWriteIds( + dbName, tblName, watermarkId, nextId, MM_WRITE_COMMITTED); + // TODO: we could optimize here and send the smaller of the lists, and also use ranges + if (ids != null) { + Iterator iter = ids.iterator(); + long oldWatermarkId = watermarkId; + while (iter.hasNext()) { + if (iter.next() != watermarkId + 1) break; + ++watermarkId; + } + long removed = watermarkId - oldWatermarkId; + if (removed > 0) { + ids = ids.subList((int)removed, ids.size()); + } + if (!ids.isEmpty()) { + result.setIds(ids); + result.setAreIdsValid(true); + } + } + } + result.setHighWatermarkId(nextId); + result.setLowWatermarkId(watermarkId); + ok = true; + } finally { + commitOrRollback(ms, ok); + } + } catch (Exception e) { + ex = e; + throwMetaException(e); + } finally { + endFunction("get_valid_write_ids", ex == null, ex, tblName); + } + return result; + } } @@ -7053,7 +7092,7 @@ private static void startCompactorCleaner(HiveConf conf) throws Exception { } private static MetaStoreThread instantiateThread(String classname) throws Exception { - Class c = Class.forName(classname); + Class c = Class.forName(classname); Object o = c.newInstance(); if (MetaStoreThread.class.isAssignableFrom(o.getClass())) { return (MetaStoreThread)o; @@ -7082,7 +7121,7 @@ private static void startHouseKeeperService(HiveConf conf) throws Exception { startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidCompactionHistoryService")); startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidWriteSetService")); } - private static void startHouseKeeperService(HiveConf conf, Class c) throws Exception { + private static void startHouseKeeperService(HiveConf conf, Class c) throws Exception { //todo: when metastore adds orderly-shutdown logic, houseKeeper.stop() //should be called form it HouseKeeperService houseKeeper = (HouseKeeperService)c.newInstance(); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 6bd6d92bce37..0325854e7cea 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -2404,4 +2404,10 @@ public void heartbeatTableWrite( String dbName, String tableName, long writeId) throws TException { client.heartbeat_write_id(new HeartbeatWriteIdRequest(dbName, tableName, writeId)); } + + @Override + public GetValidWriteIdsResult getValidWriteIds( + String dbName, String tableName) throws TException { + return client.get_valid_write_ids(new GetValidWriteIdsRequest(dbName, tableName)); + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index f5d611d8d929..870631264d2c 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; +import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResult; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; @@ -1626,4 +1627,6 @@ void addForeignKey(List foreignKeyCols) throws void finalizeTableWrite(String dbName, String tableName, long writeId, boolean commit) throws TException; + + GetValidWriteIdsResult getValidWriteIds(String dbName, String tableName) throws TException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 561f3e3708a0..125a3e51209a 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -113,15 +113,8 @@ class MetaStoreDirectSql { private final boolean isAggregateStatsCacheEnabled; private AggregateStatsCache aggrStatsCache; - public MetaStoreDirectSql(PersistenceManager pm, Configuration conf) { + public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, DatabaseProduct dbType) { this.pm = pm; - DatabaseProduct dbType = null; - try { - dbType = DatabaseProduct.determineDatabaseProduct(getProductName()); - } catch (SQLException e) { - LOG.warn("Cannot determine database product; assuming OTHER", e); - dbType = DatabaseProduct.OTHER; - } this.dbType = dbType; int batchSize = HiveConf.getIntVar(conf, ConfVars.METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE); if (batchSize == DETECT_BATCHING) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 9dc80b1c97cb..fb3b1ada9bba 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -25,6 +25,8 @@ import java.net.InetAddress; import java.net.URI; import java.nio.ByteBuffer; +import java.sql.Connection; +import java.sql.SQLException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -52,6 +54,7 @@ import javax.jdo.Query; import javax.jdo.Transaction; import javax.jdo.datastore.DataStoreCache; +import javax.jdo.datastore.JDOConnection; import javax.jdo.identity.IntIdentity; import com.google.common.collect.Maps; @@ -220,6 +223,7 @@ private static enum TXN_STATUS { private boolean isInitialized = false; private PersistenceManager pm = null; private MetaStoreDirectSql directSql = null; + private DatabaseProduct dbType = null; private PartitionExpressionProxy expressionProxy = null; private Configuration hiveConf; private volatile int openTrasactionCalls = 0; @@ -329,15 +333,37 @@ private void initialize(Properties dsProps) { pm = getPersistenceManager(); isInitialized = pm != null; if (isInitialized) { + dbType = determineDatabaseProduct(); expressionProxy = createExpressionProxy(hiveConf); if (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL)) { - directSql = new MetaStoreDirectSql(pm, hiveConf); + directSql = new MetaStoreDirectSql(pm, hiveConf, dbType); } } LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm + " created in the thread with id: " + Thread.currentThread().getId()); } + private DatabaseProduct determineDatabaseProduct() { + try { + return DatabaseProduct.determineDatabaseProduct(getProductName(pm)); + } catch (SQLException e) { + LOG.warn("Cannot determine database product; assuming OTHER", e); + return DatabaseProduct.OTHER; + } + } + + private static String getProductName(PersistenceManager pm) { + JDOConnection jdoConn = pm.getDataStoreConnection(); + try { + return ((Connection)jdoConn.getNativeConnection()).getMetaData().getDatabaseProductName(); + } catch (Throwable t) { + LOG.warn("Error retrieving product name", t); + return null; + } finally { + jdoConn.close(); // We must release the connection before we call other pm methods. + } + } + /** * Creates the proxy used to evaluate expressions. This is here to prevent circular * dependency - ql -> metastore client <-> metastore server -> ql. If server and @@ -511,15 +537,52 @@ public boolean openTransaction() { return result; } - /** - * if this is the commit of the first open call then an actual commit is - * called. - * - * @return Always returns true - */ @Override @SuppressWarnings("nls") public boolean commitTransaction() { + if (!startCommitTransaction()) return false; + + openTrasactionCalls--; + debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive()); + if ((openTrasactionCalls == 0) && currentTransaction.isActive()) { + transactionStatus = TXN_STATUS.COMMITED; + currentTransaction.commit(); + } + + return true; + } + + @Override + @CanNotRetry + public Boolean commitTransactionExpectDeadlock() { + if (!startCommitTransaction()) return false; + + if (--openTrasactionCalls != 0) { + String msg = "commitTransactionExpectDeadlock cannot be called for a nested transaction"; + LOG.error(msg); + throw new AssertionError(msg); + } + + transactionStatus = TXN_STATUS.COMMITED; + try { + currentTransaction.commit(); + } catch (Exception ex) { + Throwable candidate = ex; + while (candidate != null && !(candidate instanceof SQLException)) { + candidate = candidate.getCause(); + } + if (candidate == null) throw ex; + if (DatabaseProduct.isDeadlock(dbType, (SQLException)candidate)) { + LOG.info("Deadlock exception during commit: " + candidate.getMessage()); + return null; + } + throw ex; + } + + return true; + } + + private boolean startCommitTransaction() { if (TXN_STATUS.ROLLBACK == transactionStatus) { debugLog("Commit transaction: rollback"); return false; @@ -538,14 +601,6 @@ public boolean commitTransaction() { LOG.error("Unbalanced calls to open/commit Transaction", e); throw e; } - openTrasactionCalls--; - debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive()); - - if ((openTrasactionCalls == 0) && currentTransaction.isActive()) { - transactionStatus = TXN_STATUS.COMMITED; - currentTransaction.commit(); - } - return true; } @@ -1487,7 +1542,7 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), tbl.getViewOriginalText(), tbl.getViewExpandedText(), - tableType, tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : -1, + tableType, tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : 0, tbl.isSetMmWatermarkWriteId() ? tbl.getMmWatermarkWriteId() : -1); } @@ -2718,7 +2773,8 @@ public GetHelper(String dbName, String tblName, boolean allowSql, boolean allowJ boolean isConfigEnabled = HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL) && (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL_DDL) || !isInTxn); if (isConfigEnabled && directSql == null) { - directSql = new MetaStoreDirectSql(pm, getConf()); + dbType = determineDatabaseProduct(); + directSql = new MetaStoreDirectSql(pm, getConf(), dbType); } if (!allowJdo && isConfigEnabled && !directSql.isCompatibleDatastore()) { @@ -8692,16 +8748,10 @@ public MTableWrite getTableWrite( Query query = null; try { openTransaction(); - dbName = HiveStringUtils.normalizeIdentifier(dbName); - tblName = HiveStringUtils.normalizeIdentifier(tblName); - MTable mtbl = getMTable(dbName, tblName); - if (mtbl == null) { - success = true; - return null; - } query = pm.newQuery(MTableWrite.class, "table.tableName == t1 && table.database.name == t2 && writeId == t3"); query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3"); + @SuppressWarnings("unchecked") List writes = (List) query.execute(tblName, dbName, writeId); pm.retrieveAll(writes); success = true; @@ -8723,4 +8773,34 @@ public MTableWrite getTableWrite( } } + @Override + public List getWriteIds(String dbName, String tblName, + long watermarkId, long nextWriteId, char state) throws MetaException { + boolean success = false; + Query query = null; + try { + openTransaction(); + query = pm.newQuery("select writeId from org.apache.hadoop.hive.metastore.model.MTableWrite" + + " where table.tableName == t1 && table.database.name == t2 && writeId >= t3" + + " && writeId < t4 && state == t5"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3, " + + "java.lang.Long t4, java.lang.String t5"); + query.setResult("writeId"); + query.setOrdering("writeId asc"); + @SuppressWarnings("unchecked") + List writes = (List) query.executeWithArray( + tblName, dbName, watermarkId, nextWriteId, String.valueOf(state)); + success = true; + return (writes == null || writes.isEmpty()) ? null : new ArrayList<>(writes); + } finally { + if (success) { + commitTransaction(); + } else { + rollbackTransaction(); + } + if (query != null) { + query.closeAll(); + } + } + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index c5359cf1c395..170c07d4c4c9 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -93,6 +93,15 @@ public interface RawStore extends Configurable { @CanNotRetry public abstract boolean commitTransaction(); + /** + * Commits transaction and detects if the failure to do so is a deadlock or not. + * Must be called on the top level with regard to openTransaction calls; attempting to + * call this after several nested openTransaction calls will throw. + * @return true or false - same as commitTransaction; null in case of deadlock. + */ + @CanNotRetry + public abstract Boolean commitTransactionExpectDeadlock(); + /** * Rolls back the current transaction if it is active */ @@ -687,4 +696,6 @@ void createTableWithConstraints(Table tbl, List primaryKeys, MTableWrite getTableWrite(String dbName, String tblName, long writeId) throws MetaException; void createTableWrite(Table tbl, long writeId, char state, long heartbeat); + + List getWriteIds(String dbName, String tblName, long watermarkId, long nextWriteId, char state) throws MetaException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index 4fbeb9ed6e05..829f0aeda699 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.metastore.PartitionExpressionProxy; import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.RawStore.CanNotRetry; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -131,12 +132,26 @@ public boolean openTransaction() { @Override public boolean commitTransaction() { if (--txnNestLevel == 0) { - LOG.debug("Committing HBase transaction"); - getHBase().commit(); + commitInternal(); } return true; } + @Override + @CanNotRetry + public Boolean commitTransactionExpectDeadlock() { + if (--txnNestLevel != 0) { + throw new AssertionError("Cannot be called on a nested transaction"); + } + commitInternal(); + return true; + } + + private void commitInternal() { + LOG.debug("Committing HBase transaction"); + getHBase().commit(); + } + @Override public void rollbackTransaction() { txnNestLevel = 0; @@ -2741,4 +2756,12 @@ public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { // TODO: Auto-generated method stub throw new UnsupportedOperationException(); } + + + @Override + public List getWriteIds( + String dbName, String tblName, long watermarkId, long nextWriteId, char state) { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } } diff --git a/metastore/src/model/package.jdo b/metastore/src/model/package.jdo index 512655622b96..bd71056faea8 100644 --- a/metastore/src/model/package.jdo +++ b/metastore/src/model/package.jdo @@ -53,7 +53,7 @@ - + @@ -183,10 +183,10 @@ - + - + @@ -210,7 +210,7 @@ - + @@ -219,7 +219,7 @@ - + @@ -288,7 +288,7 @@ - + @@ -308,7 +308,7 @@ - + @@ -1003,7 +1003,7 @@ - + diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 9fffd3ff655b..98c543fe988f 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -878,4 +878,16 @@ public void updateTableWrite(MTableWrite tw) { public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { return null; } + + @Override + @CanNotRetry + public Boolean commitTransactionExpectDeadlock() { + return null; + } + + @Override + public List getWriteIds( + String dbName, String tblName, long watermarkId, long nextWriteId, char state) { + return null; + } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index a763085436b4..8e54b1629304 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -104,14 +104,17 @@ public boolean openTransaction() { @Override public boolean commitTransaction() { + return false; + } + @Override + @CanNotRetry + public Boolean commitTransactionExpectDeadlock() { return false; } @Override public void rollbackTransaction() { - - } @Override @@ -893,6 +896,12 @@ public void updateTableWrite(MTableWrite tw) { public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { return null; } + + @Override + public List getWriteIds( + String dbName, String tblName, long watermarkId, long nextWriteId, char state) { + return null; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 42d398dcc9a3..45a80e3497ef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -39,6 +39,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.common.metrics.common.Metrics; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; @@ -46,8 +47,11 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveVariableSource; import org.apache.hadoop.hive.conf.VariableSubstitution; +import org.apache.hadoop.hive.metastore.LockComponentBuilder; import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.LockComponent; import org.apache.hadoop.hive.metastore.api.Schema; import org.apache.hadoop.hive.ql.exec.ConditionalTask; import org.apache.hadoop.hive.ql.exec.ExplainTask; @@ -71,6 +75,7 @@ import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContextImpl; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.lockmgr.HiveLock; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; @@ -1416,6 +1421,11 @@ else if(!plan.getAutoCommitValue() && txnManager.getAutoCommit()) { return rollback(createProcessorResponse(ret)); } } + try { + acquireWriteIds(plan, conf); + } catch (HiveException e) { + return handleHiveException(e, 1); + } ret = execute(); if (ret != 0) { //if needRequireLock is false, the release here will do nothing because there is no lock @@ -1458,6 +1468,34 @@ else if(plan.getOperation() == HiveOperation.ROLLBACK) { return createProcessorResponse(ret); } + private static void acquireWriteIds(QueryPlan plan, HiveConf conf) throws HiveException { + // Output IDs are put directly into FileSinkDesc; here, we only need to take care of inputs. + for (ReadEntity input : plan.getInputs()) { + Table t = extractMmTable(input); + if (t == null) continue; + ValidWriteIds ids = Hive.get().getValidWriteIdsForTable(t.getDbName(), t.getTableName()); + ids.addToConf(conf, t.getDbName(), t.getTableName()); + if (plan.getFetchTask() != null) { + ids.addToConf(plan.getFetchTask().getFetchConf(), t.getDbName(), t.getTableName()); + } + } + } + + private static Table extractMmTable(ReadEntity input) { + Table t = null; + switch (input.getType()) { + case TABLE: + t = input.getTable(); + break; + case DUMMYPARTITION: + case PARTITION: + t = input.getPartition().getTable(); + break; + default: return null; + } + return (t != null && !t.isTemporary() && AcidUtils.isMmTable(t)) ? t : null; + } + private CommandProcessorResponse rollback(CommandProcessorResponse cpr) { //console.printError(cpr.toString()); try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index 601ad08331d5..7375cd453fef 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -30,6 +30,7 @@ import org.apache.commons.lang3.StringEscapeUtils; import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -695,4 +696,8 @@ public RecordReader getRecordReader(JobConf job) t return inputFormat.getRecordReader(getInputSplit(), job, Reporter.NULL); } } + + public Configuration getJobConf() { + return job; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java index 8c7d99d07126..93c03fd0e6f3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java @@ -24,6 +24,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.DriverContext; @@ -193,4 +194,8 @@ public void clearFetch() throws HiveException { } } + public Configuration getFetchConf() { + return fetch.getJobConf(); + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 6a0143aa5050..e4e0153dc0a9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -40,10 +40,10 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.CompilationOpContext; @@ -239,7 +239,8 @@ private void commit(FileSystem fs) throws HiveException { } } if (isMmTable) { - Path manifestPath = new Path(specPath, "_tmp." + getMmPrefixedTaskId() + MANIFEST_EXTENSION); + Path manifestPath = new Path(specPath, "_tmp." + ValidWriteIds.getMmFilePrefix( + conf.getMmWriteId()) + "_" + taskId + MANIFEST_EXTENSION); Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths); try { try (FSDataOutputStream out = fs.create(manifestPath)) { @@ -323,11 +324,12 @@ public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeT } outPaths[filesIdx] = getTaskOutPath(taskId); } else { + String subdirPath = ValidWriteIds.getMmFilePrefix(conf.getMmWriteId()) + "/" + taskId; if (!bDynParts && !isSkewedStoredAsSubDirectories) { - finalPaths[filesIdx] = getFinalPath(getMmPrefixedTaskId(), specPath, extension); + finalPaths[filesIdx] = getFinalPath(subdirPath, specPath, extension); } else { - // TODO# wrong! - finalPaths[filesIdx] = getFinalPath(getMmPrefixedTaskId(), specPath, extension); + // TODO# wrong! special case #N bucketing + finalPaths[filesIdx] = getFinalPath(subdirPath, specPath, extension); } outPaths[filesIdx] = finalPaths[filesIdx]; } @@ -721,10 +723,6 @@ protected boolean updateProgress() { } } - private String getMmPrefixedTaskId() { - return AcidUtils.getMmFilePrefix(conf.getMmWriteId()) + taskId; - } - protected Writable recordValue; @@ -1195,21 +1193,6 @@ public void jobCloseOp(Configuration hconf, boolean success) super.jobCloseOp(hconf, success); } - private static class ExecPrefixPathFilter implements PathFilter { - private final String prefix, tmpPrefix; - public ExecPrefixPathFilter(String prefix) { - this.prefix = prefix; - this.tmpPrefix = "_tmp." + prefix; - } - - @Override - public boolean accept(Path path) { - String name = path.getName(); - return name.startsWith(prefix) || name.startsWith(tmpPrefix); - } - } - - private void handleMmTable(Path specPath, Configuration hconf, boolean success, DynamicPartitionCtx dpCtx, FileSinkDesc conf, Reporter reporter) throws IOException, HiveException { @@ -1217,7 +1200,7 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, int targetLevel = (dpCtx == null) ? 1 : dpCtx.getNumDPCols(); if (!success) { FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse(specPath, targetLevel, fs, - new ExecPrefixPathFilter(AcidUtils.getMmFilePrefix(conf.getMmWriteId()))); + new ValidWriteIds.IdPathFilter(conf.getMmWriteId(), true)); for (FileStatus status : statuses) { Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); tryDelete(fs, status.getPath()); @@ -1225,15 +1208,19 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, return; } FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse(specPath, targetLevel, fs, - new ExecPrefixPathFilter(AcidUtils.getMmFilePrefix(conf.getMmWriteId()))); + new ValidWriteIds.IdPathFilter(conf.getMmWriteId(), true)); if (statuses == null) return; LinkedList results = new LinkedList<>(); List manifests = new ArrayList<>(statuses.length); for (FileStatus status : statuses) { if (status.getPath().getName().endsWith(MANIFEST_EXTENSION)) { manifests.add(status.getPath()); + } else if (!status.isDirectory()) { + Path path = status.getPath(); + Utilities.LOG14535.warn("Unknown file found - neither a manifest nor directory: " + path); + tryDelete(fs, path); } else { - results.add(status); + results.addAll(Lists.newArrayList(fs.listStatus(status.getPath()))); } } HashSet committed = new HashSet<>(); @@ -1254,7 +1241,10 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, if (!committed.remove(rfs.getPath().toString())) { iter.remove(); Utilities.LOG14535.info("Deleting " + rfs.getPath() + " that was not committed"); - tryDelete(fs, rfs.getPath()); + // We should actually succeed here - if we fail, don't commit the query. + if (!fs.delete(rfs.getPath(), true)) { + throw new HiveException("Failed to delete an uncommitted path " + rfs.getPath()); + } } } if (!committed.isEmpty()) { @@ -1268,6 +1258,7 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, if (results.isEmpty()) return; FileStatus[] finalResults = results.toArray(new FileStatus[results.size()]); + // TODO# dp will break - removeTempOrDuplicateFiles assumes dirs in results. Why? We recurse... List emptyBuckets = Utilities.removeTempOrDuplicateFiles( fs, finalResults, dpCtx, conf, hconf); // create empty buckets if necessary @@ -1278,7 +1269,7 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, private void tryDelete(FileSystem fs, Path path) { try { - fs.delete(path, false); + fs.delete(path, true); } catch (IOException ex) { LOG.error("Failed to delete " + path, ex); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index f2389eaf447f..3be21c469d73 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -314,17 +314,9 @@ public int execute(DriverContext driverContext) { checkFileFormats(db, tbd, table); boolean isAcid = work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID; - if (tbd.isMmTable()) { - if (tbd.getReplace()) { - // TODO#: would need a list of new files to support. Then, old ones only would need - // to be removed from MS (and FS). Also, per-partition IOW is problematic for - // the prefix case. - throw new HiveException("Replace and MM are not supported"); - } - if (isAcid) { - // TODO# need to make sure ACID writes to final directories. Otherwise, might need to move. - throw new HiveException("ACID and MM are not supported"); - } + if (tbd.isMmTable() && isAcid) { + // TODO# need to make sure ACID writes to final directories. Otherwise, might need to move. + throw new HiveException("ACID and MM are not supported"); } // Create a data container diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 9e6a2011bfdf..03abdc186403 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -123,6 +123,7 @@ import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; import org.apache.hadoop.hive.ql.io.IOConstants; +import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.io.OneNullRowInputFormat; import org.apache.hadoop.hive.ql.io.RCFile; import org.apache.hadoop.hive.ql.io.ReworkMapredInputFormat; @@ -161,6 +162,7 @@ import org.apache.hadoop.hive.ql.stats.StatsPublisher; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; +import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.Serializer; @@ -192,6 +194,7 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hadoop.mapred.SequenceFileOutputFormat; +import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Shell; import org.apache.hive.common.util.ReflectionUtil; @@ -199,6 +202,7 @@ import org.slf4j.LoggerFactory; import com.esotericsoftware.kryo.Kryo; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; /** @@ -651,6 +655,7 @@ protected void initialize(Class type, Object oldInstance, Object newInstance, En } } + @VisibleForTesting public static TableDesc defaultTd; static { // by default we expect ^A separated strings @@ -658,7 +663,16 @@ protected void initialize(Class type, Object oldInstance, Object newInstance, En // PlanUtils.getDefaultTableDesc(String separatorCode, String columns) // or getBinarySortableTableDesc(List fieldSchemas) when // we know the column names. - defaultTd = PlanUtils.getDefaultTableDesc("" + Utilities.ctrlaCode); + /** + * Generate the table descriptor of MetadataTypedColumnsetSerDe with the + * separatorCode. MetaDataTypedColumnsetSerDe is used because LazySimpleSerDe + * does not support a table with a single column "col" with type + * "array". + */ + defaultTd = new TableDesc(TextInputFormat.class, IgnoreKeyTextOutputFormat.class, + Utilities.makeProperties(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, + "" + Utilities.ctrlaCode, serdeConstants.SERIALIZATION_LIB, + MetadataTypedColumnsetSerDe.class.getName())); } public static final int carriageReturnCode = 13; @@ -1528,14 +1542,9 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] // get the missing buckets and generate empty buckets String taskID1 = taskIDToFile.keySet().iterator().next(); Path bucketPath = taskIDToFile.values().iterator().next().getPath(); + Utilities.LOG14535.info("Bucket path " + bucketPath); for (int j = 0; j < dpCtx.getNumBuckets(); ++j) { - String taskID2 = replaceTaskId(taskID1, j); - if (!taskIDToFile.containsKey(taskID2)) { - // create empty bucket, file name should be derived from taskID2 - URI bucketUri = bucketPath.toUri(); - String path2 = replaceTaskIdFromFilename(bucketUri.getPath().toString(), j); - result.add(new Path(bucketUri.getScheme(), bucketUri.getAuthority(), path2)); - } + addBucketFileIfMissing(result, taskIDToFile, taskID1, bucketPath, j); } } } @@ -1550,14 +1559,9 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] // get the missing buckets and generate empty buckets for non-dynamic partition String taskID1 = taskIDToFile.keySet().iterator().next(); Path bucketPath = taskIDToFile.values().iterator().next().getPath(); + Utilities.LOG14535.info("Bucket path " + bucketPath); for (int j = 0; j < conf.getTable().getNumBuckets(); ++j) { - String taskID2 = replaceTaskId(taskID1, j); - if (!taskIDToFile.containsKey(taskID2)) { - // create empty bucket, file name should be derived from taskID2 - URI bucketUri = bucketPath.toUri(); - String path2 = replaceTaskIdFromFilename(bucketUri.getPath().toString(), j); - result.add(new Path(bucketUri.getScheme(), bucketUri.getAuthority(), path2)); - } + addBucketFileIfMissing(result, taskIDToFile, taskID1, bucketPath, j); } } } @@ -1565,6 +1569,19 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] return result; } + private static void addBucketFileIfMissing(List result, + HashMap taskIDToFile, String taskID1, Path bucketPath, int j) { + // TODO# this will probably break with directories cause buckets would be above (or not?) + String taskID2 = replaceTaskId(taskID1, j); + if (!taskIDToFile.containsKey(taskID2)) { + // create empty bucket, file name should be derived from taskID2 + URI bucketUri = bucketPath.toUri(); + String path2 = replaceTaskIdFromFilename(bucketUri.getPath().toString(), j); + Utilities.LOG14535.info("Creating an empty bucket file " + path2); + result.add(new Path(bucketUri.getScheme(), bucketUri.getAuthority(), path2)); + } + } + public static HashMap removeTempOrDuplicateFiles(FileStatus[] items, FileSystem fs) throws IOException { @@ -2976,8 +2993,9 @@ public static List getInputPaths(JobConf job, MapWork work, Path hiveScrat // The alias may not have any path Path path = null; - for (Path file : new LinkedList(work.getPathToAliases().keySet())) { - List aliases = work.getPathToAliases().get(file); + for (Map.Entry> e : work.getPathToAliases().entrySet()) { + Path file = e.getKey(); + List aliases = e.getValue(); if (aliases.contains(alias)) { path = file; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 1ef15cbf2f3a..70b129e86208 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -1167,8 +1167,4 @@ public static AcidOperationalProperties getAcidOperationalProperties( } return AcidOperationalProperties.parseString(resultStr); } - - public static String getMmFilePrefix(long mmWriteId) { - return "mm_" + mmWriteId + "_"; - } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index c4b9940c06d2..0510e08ba4a6 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -23,9 +23,11 @@ import java.io.DataInput; import java.io.DataOutput; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -39,8 +41,10 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil; @@ -345,7 +349,10 @@ protected void init(JobConf job) { */ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, JobConf conf, InputFormat inputFormat, Class inputFormatClass, int splits, - TableDesc table, List result) throws IOException { + TableDesc table, Map writeIdMap, List result) + throws IOException { + ValidWriteIds writeIds = extractWriteIds(writeIdMap, conf, table.getTableName()); + Utilities.LOG14535.info("Observing " + table.getTableName() + ": " + writeIds); Utilities.copyTablePropertiesToConf(table, conf); @@ -353,7 +360,19 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job pushFilters(conf, tableScan); } - FileInputFormat.setInputPaths(conf, dirs.toArray(new Path[dirs.size()])); + if (writeIds == null) { + FileInputFormat.setInputPaths(conf, dirs.toArray(new Path[dirs.size()])); + } else { + List finalPaths = new ArrayList<>(dirs.size()); + for (Path dir : dirs) { + processForWriteIds(dir, conf, writeIds, finalPaths); + } + if (finalPaths.isEmpty()) { + LOG.warn("No valid inputs found in " + dirs); + return; + } + FileInputFormat.setInputPaths(conf, finalPaths.toArray(new Path[finalPaths.size()])); + } conf.setInputFormat(inputFormat.getClass()); int headerCount = 0; @@ -373,6 +392,24 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job } } + private void processForWriteIds(Path dir, JobConf conf, + ValidWriteIds writeIds, List finalPaths) throws IOException { + FileStatus[] files = dir.getFileSystem(conf).listStatus(dir); // TODO: batch? + for (FileStatus file : files) { + Path subdir = file.getPath(); + if (!file.isDirectory()) { + Utilities.LOG14535.warn("Found a file not in subdirectory " + subdir); + continue; + } + if (!writeIds.isValidInput(subdir)) { + Utilities.LOG14535.warn("Ignoring an uncommitted directory " + subdir); + continue; + } + Utilities.LOG14535.info("Adding input " + subdir); + finalPaths.add(subdir); + } + } + Path[] getInputPaths(JobConf job) throws IOException { Path[] dirs; if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { @@ -416,6 +453,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { StringBuilder readColumnNamesBuffer = new StringBuilder(newjob. get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "")); // for each dir, get the InputFormat, and do getSplits. + Map writeIdMap = new HashMap<>(); for (Path dir : dirs) { PartitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir); Class inputFormatClass = part.getInputFileFormatClass(); @@ -466,7 +504,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { addSplitsForGroup(currentDirs, currentTableScan, newjob, getInputFormatFromCache(currentInputFormatClass, job), currentInputFormatClass, currentDirs.size()*(numSplits / dirs.length), - currentTable, result); + currentTable, writeIdMap, result); } currentDirs.clear(); @@ -488,7 +526,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { addSplitsForGroup(currentDirs, currentTableScan, newjob, getInputFormatFromCache(currentInputFormatClass, job), currentInputFormatClass, currentDirs.size()*(numSplits / dirs.length), - currentTable, result); + currentTable, writeIdMap, result); } Utilities.clearWorkMapForConf(job); @@ -499,6 +537,19 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { return result.toArray(new HiveInputSplit[result.size()]); } + private static ValidWriteIds extractWriteIds(Map writeIdMap, + JobConf newjob, String tableName) { + if (StringUtils.isBlank(tableName)) return null; + ValidWriteIds writeIds = writeIdMap.get(tableName); + if (writeIds == null) { + writeIds = ValidWriteIds.createFromConf(newjob, tableName); + writeIdMap.put(tableName, writeIds != null ? writeIds : ValidWriteIds.NO_WRITE_IDS); + } else if (writeIds == ValidWriteIds.NO_WRITE_IDS) { + writeIds = null; + } + return writeIds; + } + private void pushProjection(final JobConf newjob, final StringBuilder readColumnsBuffer, final StringBuilder readColumnNamesBuffer) { String readColIds = readColumnsBuffer.toString(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 2ba4fa2953a4..f3609df7f8c2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable; import org.apache.hadoop.hive.conf.HiveConf; @@ -96,6 +97,7 @@ import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; +import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResult; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.HiveObjectType; @@ -1563,6 +1565,11 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par if (areEventsForDmlNeeded(tbl, oldPart)) { newFiles = listFilesCreatedByQuery(loadPath, mmWriteId); } + if (replace) { + Path tableDest = tbl.getPath(); + deleteOldPathForReplace(newPartPath, oldPartPath, + getConf(), new ValidWriteIds.IdPathFilter(mmWriteId, false)); + } } else { if (replace || (oldPart == null && !isAcid)) { replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(), @@ -1652,7 +1659,7 @@ private boolean areEventsForDmlNeeded(Table tbl, Partition oldPart) { private List listFilesCreatedByQuery(Path loadPath, long mmWriteId) throws HiveException { List newFiles = new ArrayList(); - final String filePrefix = AcidUtils.getMmFilePrefix(mmWriteId); + final String filePrefix = ValidWriteIds.getMmFilePrefix(mmWriteId); FileStatus[] srcs; FileSystem srcFs; try { @@ -1920,7 +1927,7 @@ public Void call() throws Exception { for (Future future : futures) { future.get(); } - // TODO# we would commit the txn to metastore here + // TODO# special case #N - DP - we would commit the txn to metastore here } catch (InterruptedException | ExecutionException e) { LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); //cancel other futures @@ -1993,6 +2000,11 @@ public void loadTable(Path loadPath, String tableName, boolean replace, boolean } } } else { + if (replace) { + Path tableDest = tbl.getPath(); + deleteOldPathForReplace(tableDest, tableDest, sessionConf, + new ValidWriteIds.IdPathFilter(mmWriteId, false)); + } newFiles = listFilesCreatedByQuery(loadPath, mmWriteId); } if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { @@ -3376,39 +3388,10 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, } if (oldPath != null) { - boolean oldPathDeleted = false; - boolean isOldPathUnderDestf = false; - FileStatus[] statuses = null; - try { - FileSystem oldFs = oldPath.getFileSystem(conf); - statuses = oldFs.listStatus(oldPath, FileUtils.HIDDEN_FILES_PATH_FILTER); - // Do not delete oldPath if: - // - destf is subdir of oldPath - isOldPathUnderDestf = isSubDir(oldPath, destf, oldFs, destFs, false); - if (isOldPathUnderDestf) { - // if oldPath is destf or its subdir, its should definitely be deleted, otherwise its - // existing content might result in incorrect (extra) data. - // But not sure why we changed not to delete the oldPath in HIVE-8750 if it is - // not the destf or its subdir? - oldPathDeleted = trashFiles(oldFs, statuses, conf); - } - } catch (IOException e) { - if (isOldPathUnderDestf) { - // if oldPath is a subdir of destf but it could not be cleaned - throw new HiveException("Directory " + oldPath.toString() - + " could not be cleaned up.", e); - } else { - //swallow the exception since it won't affect the final result - LOG.warn("Directory " + oldPath.toString() + " cannot be cleaned: " + e, e); - } - } - if (statuses != null && statuses.length > 0) { - if (isOldPathUnderDestf && !oldPathDeleted) { - throw new HiveException("Destination directory " + destf + " has not be cleaned up."); - } - } + deleteOldPathForReplace(destf, oldPath, conf, FileUtils.HIDDEN_FILES_PATH_FILTER); } + // TODO# what are the paths that use this? MM tables will need to do this beforehand // first call FileUtils.mkdir to make sure that destf directory exists, if not, it creates // destf with inherited permissions boolean inheritPerms = HiveConf.getBoolVar(conf, HiveConf.ConfVars @@ -3442,6 +3425,37 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, } + private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, + PathFilter pathFilter) throws HiveException { + boolean isOldPathUnderDestf = false; + try { + FileSystem oldFs = oldPath.getFileSystem(conf); + FileSystem destFs = destPath.getFileSystem(conf); + // if oldPath is destf or its subdir, its should definitely be deleted, otherwise its + // existing content might result in incorrect (extra) data. + // But not sure why we changed not to delete the oldPath in HIVE-8750 if it is + // not the destf or its subdir? + isOldPathUnderDestf = isSubDir(oldPath, destPath, oldFs, destFs, false); + if (isOldPathUnderDestf) { + FileStatus[] statuses = oldFs.listStatus(oldPath, pathFilter); + if (statuses != null && statuses.length > 0 && !trashFiles(oldFs, statuses, conf)) { + throw new HiveException("Destination directory " + destPath + + " has not been cleaned up."); + } + } + } catch (IOException e) { + if (isOldPathUnderDestf) { + // if oldPath is a subdir of destf but it could not be cleaned + throw new HiveException("Directory " + oldPath.toString() + + " could not be cleaned up.", e); + } else { + //swallow the exception since it won't affect the final result + LOG.warn("Directory " + oldPath.toString() + " cannot be cleaned: " + e, e); + } + } + } + + /** * Trashes or deletes all files under a directory. Leaves the directory as is. * @param fs FileSystem to use @@ -4007,7 +4021,6 @@ public void addForeignKey(List foreignKeyCols) } } - public long getNextTableWriteId(String dbName, String tableName) throws HiveException { try { return getMSC().getNextTableWriteId(dbName, tableName); @@ -4015,4 +4028,17 @@ public long getNextTableWriteId(String dbName, String tableName) throws HiveExce throw new HiveException(e); } } + + public ValidWriteIds getValidWriteIdsForTable( + String dbName, String tableName) throws HiveException { + try { + // TODO: decode ID ranges here if we use that optimization + GetValidWriteIdsResult result = getMSC().getValidWriteIds(dbName, tableName); + return new ValidWriteIds(result.getLowWatermarkId(), result.getHighWatermarkId(), + result.isSetAreIdsValid() && result.isAreIdsValid(), + result.isSetIds() ? new HashSet(result.getIds()) : null); + } catch (Exception e) { + throw new HiveException(e); + } + } }; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index bb7001a68ca5..675bfd028aaa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1808,7 +1808,7 @@ public static Path createMoveTask(Task currTask, boolean // Create the required temporary file in the HDFS location if the destination // path of the FileSinkOperator table is a blobstore path. - // TODO# HERE + // TODO# special case #N - linked FDs (unions?) Path tmpDir = baseCtx.getTempDirForPath(fileSinkDesc.getDestPath()); // Change all the linked file sink descriptors diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java index e2887fdfd7e4..ee674430689d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/AnnotateRunTimeStatsOptimizer.java @@ -71,7 +71,6 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) Task currTask = (Task) nd; Set> ops = new HashSet<>(); - /* TODO# wtf if (currTask instanceof MapRedTask) { MapRedTask mr = (MapRedTask) currTask; ops.addAll(mr.getWork().getAllOperators()); @@ -85,7 +84,7 @@ public Object dispatch(Node nd, Stack stack, Object... nodeOutputs) for (BaseWork w : sparkWork.getAllWork()) { ops.addAll(w.getAllOperators()); } - }*/ + } setOrAnnotateStats(ops, physicalContext.getParseContext()); return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 422be8efd3f6..93fe0e9d625a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -206,7 +206,7 @@ public void compile(final ParseContext pCtx, final List tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); mvTask.add(tsk); // Check to see if we are stale'ing any indexes and auto-update them if we want diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java index 5cc36635d1e4..1be4d84e95ee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.mapred.JobConf; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Interner; /** @@ -375,6 +376,7 @@ public void setNumMapTasks(Integer numMapTasks) { } @SuppressWarnings("nls") + @VisibleForTesting public void addMapWork(Path path, String alias, Operator work, PartitionDesc pd) { ArrayList curAliases = pathToAliases.get(path); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 5dc3aa6f9c89..f055cde199d4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -380,20 +380,6 @@ public static TableDesc getTableDesc(CreateTableDesc crtTblDesc, String cols, return ret; } - /** - * Generate the table descriptor of MetadataTypedColumnsetSerDe with the - * separatorCode. MetaDataTypedColumnsetSerDe is used because LazySimpleSerDe - * does not support a table with a single column "col" with type - * "array". - */ - public static TableDesc getDefaultTableDesc(String separatorCode) { - return new TableDesc( - TextInputFormat.class, IgnoreKeyTextOutputFormat.class, Utilities - .makeProperties( - org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT,separatorCode, - serdeConstants.SERIALIZATION_LIB,MetadataTypedColumnsetSerDe.class.getName())); - } - /** * Generate the table descriptor for reduce key. */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java index 1da8e911b606..0a611f979b58 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java @@ -147,8 +147,7 @@ public String getSerdeClassName() { @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTableName() { - return properties - .getProperty(hive_metastoreConstants.META_TABLE_NAME); + return properties.getProperty(hive_metastoreConstants.META_TABLE_NAME); } @Explain(displayName = "input format") diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index 8d19df65500a..7c3e13807b1e 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -24,7 +24,8 @@ select * from part_mm; create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true'); insert into table simple_mm select key from intermediate; -insert into table simple_mm select key from intermediate; +insert overwrite table simple_mm select key from intermediate; + select * from simple_mm; drop table part_mm; diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index f357020d9628..ece6cbffc87a 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -182,13 +182,13 @@ POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 POSTHOOK: Output: default@simple_mm POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: insert into table simple_mm select key from intermediate +PREHOOK: query: insert overwrite table simple_mm select key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 PREHOOK: Output: default@simple_mm -POSTHOOK: query: insert into table simple_mm select key from intermediate +POSTHOOK: query: insert overwrite table simple_mm select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 @@ -207,10 +207,6 @@ POSTHOOK: Input: default@simple_mm 455 0 455 -0 -455 -0 -455 PREHOOK: query: drop table part_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@part_mm From 70299dc48f93433fb53611b05f8a719b841575c5 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Mon, 19 Sep 2016 15:40:19 -0700 Subject: [PATCH 05/24] HIVE-14700 : clean up file/txn information via a metastore thread (Sergey Shelukhin) --- .../hadoop/hive/common/ValidWriteIds.java | 40 +- .../org/apache/hadoop/hive/conf/HiveConf.java | 21 +- .../hive/common/util/MockFileSystem.java | 622 ++++++++++++++++++ .../upgrade/derby/037-HIVE-14637.derby.sql | 2 +- .../upgrade/derby/hive-schema-2.2.0.derby.sql | 2 +- .../upgrade/mssql/022-HIVE-14637.mssql.sql | 3 +- .../upgrade/mssql/hive-schema-2.2.0.mssql.sql | 3 +- .../upgrade/mysql/037-HIVE-14637.mysql.sql | 3 +- .../upgrade/mysql/hive-schema-2.2.0.mysql.sql | 3 +- .../upgrade/oracle/037-HIVE-14637.oracle.sql | 1 + .../oracle/hive-schema-2.2.0.oracle.sql | 1 + .../postgres/036-HIVE-14637.postgres.sql | 1 + .../postgres/hive-schema-2.2.0.postgres.sql | 1 + .../hadoop/hive/metastore/HiveMetaStore.java | 26 +- .../hive/metastore/MetaStoreThread.java | 1 + .../hadoop/hive/metastore/MetaStoreUtils.java | 10 + .../hive/metastore/MmCleanerThread.java | 397 +++++++++++ .../hadoop/hive/metastore/ObjectStore.java | 147 ++++- .../hadoop/hive/metastore/RawStore.java | 26 +- .../hive/metastore/hbase/HBaseStore.java | 29 +- .../hive/metastore/model/MTableWrite.java | 12 +- metastore/src/model/package.jdo | 3 + .../DummyRawStoreControlledCommit.java | 25 +- .../DummyRawStoreForJdoConnection.java | 25 +- .../hive/metastore/TestObjectStore.java | 177 ++++- .../org/apache/hadoop/hive/ql/Driver.java | 3 +- .../apache/hadoop/hive/ql/io/AcidUtils.java | 7 +- .../apache/hadoop/hive/ql/metadata/Hive.java | 2 +- .../hive/ql/parse/SemanticAnalyzer.java | 4 +- .../ql/txn/compactor/CompactorThread.java | 1 - .../hadoop/hive/ql/io/TestAcidUtils.java | 12 +- .../hive/ql/io/orc/TestInputOutputFormat.java | 549 +--------------- 32 files changed, 1527 insertions(+), 632 deletions(-) create mode 100644 common/src/test/org/apache/hive/common/util/MockFileSystem.java create mode 100644 metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java index b25a72d4b30e..b939b43247f8 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java +++ b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java @@ -29,7 +29,7 @@ public class ValidWriteIds { public static final ValidWriteIds NO_WRITE_IDS = new ValidWriteIds(-1, -1, false, null); - private static final String MM_PREFIX = "mm"; + public static final String MM_PREFIX = "mm"; private final static Logger LOG = LoggerFactory.getLogger(ValidWriteIds.class); @@ -117,22 +117,8 @@ public boolean isValid(long writeId) { } public boolean isValidInput(Path file) { - String fileName = file.getName(); - String[] parts = fileName.split("_", 3); - if (parts.length < 2 || !MM_PREFIX.equals(parts[0])) { - LOG.info("Ignoring unknown file for a MM table: " + file - + " (" + Arrays.toString(parts) + ")"); - return false; - } - long writeId = -1; - try { - writeId = Long.parseLong(parts[1]); - } catch (NumberFormatException ex) { - LOG.info("Ignoring unknown file for a MM table: " + file - + "; parsing " + parts[1] + " got " + ex.getMessage()); - return false; - } - return isValid(writeId); + Long writeId = extractWriteId(file); + return (writeId != null) && isValid(writeId); } public static String getMmFilePrefix(long mmWriteId) { @@ -155,4 +141,24 @@ public boolean accept(Path path) { return isMatch == (name.startsWith(prefix) || name.startsWith(tmpPrefix)); } } + + + public static Long extractWriteId(Path file) { + String fileName = file.getName(); + String[] parts = fileName.split("_", 3); + if (parts.length < 2 || !MM_PREFIX.equals(parts[0])) { + LOG.info("Cannot extract write ID for a MM table: " + file + + " (" + Arrays.toString(parts) + ")"); + return null; + } + long writeId = -1; + try { + writeId = Long.parseLong(parts[1]); + } catch (NumberFormatException ex) { + LOG.info("Cannot extract write ID for a MM table: " + file + + "; parsing " + parts[1] + " got " + ex.getMessage()); + return null; + } + return writeId; + } } \ No newline at end of file diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 301159ef0d93..1a85f50bfd1a 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -297,7 +297,10 @@ private static URL checkConfigFile(File f) { HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_MEMORY_TTL, HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY, HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_HBASE_TTL, - HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS + HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS, + HiveConf.ConfVars.HIVE_METASTORE_MM_THREAD_SCAN_INTERVAL, + HiveConf.ConfVars.HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT, + HiveConf.ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT }; /** @@ -3104,6 +3107,22 @@ public static enum ConfVars { "Log tracing id that can be used by upstream clients for tracking respective logs. " + "Truncated to " + LOG_PREFIX_LENGTH + " characters. Defaults to use auto-generated session id."), + HIVE_METASTORE_MM_THREAD_SCAN_INTERVAL("hive.metastore.mm.thread.scan.interval", "900s", + new TimeValidator(TimeUnit.SECONDS), + "MM table housekeeping thread interval in this metastore instance. 0 to disable."), + + HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT("hive.metastore.mm.heartbeat.timeout", "1800s", + new TimeValidator(TimeUnit.SECONDS), + "MM write ID times out after this long if a heartbeat is not send. Currently disabled."), // TODO# heartbeating not implemented + + HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT("hive.metastore.mm.absolute.timeout", "7d", + new TimeValidator(TimeUnit.SECONDS), + "MM write ID cannot be outstanding for more than this long."), + + HIVE_METASTORE_MM_ABORTED_GRACE_PERIOD("hive.metastore.mm.aborted.grace.period", "1d", + new TimeValidator(TimeUnit.SECONDS), + "MM write ID will not be removed up for that long after it has been aborted;\n" + + "this is to work around potential races e.g. with FS visibility, when deleting files."), HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list", "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role," + diff --git a/common/src/test/org/apache/hive/common/util/MockFileSystem.java b/common/src/test/org/apache/hive/common/util/MockFileSystem.java new file mode 100644 index 000000000000..e65fd33d047c --- /dev/null +++ b/common/src/test/org/apache/hive/common/util/MockFileSystem.java @@ -0,0 +1,622 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.common.util; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FSInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Progressable; + +public class MockFileSystem extends FileSystem { + final List files = new ArrayList(); + final Map fileStatusMap = new HashMap<>(); + Path workingDir = new Path("/"); + // statics for when the mock fs is created via FileSystem.get + private static String blockedUgi = null; + private final static List globalFiles = new ArrayList(); + protected Statistics statistics; + public boolean allowDelete = false; + + public MockFileSystem() { + // empty + } + + @Override + public void initialize(URI uri, Configuration conf) { + setConf(conf); + statistics = getStatistics("mock", getClass()); + } + + public MockFileSystem(Configuration conf, MockFile... files) { + setConf(conf); + this.files.addAll(Arrays.asList(files)); + statistics = getStatistics("mock", getClass()); + } + + public static void setBlockedUgi(String s) { + blockedUgi = s; + } + + public void clear() { + files.clear(); + } + + @Override + public URI getUri() { + try { + return new URI("mock:///"); + } catch (URISyntaxException err) { + throw new IllegalArgumentException("huh?", err); + } + } + + // increments file modification time + public void touch(MockFile file) { + if (fileStatusMap.containsKey(file)) { + FileStatus fileStatus = fileStatusMap.get(file); + FileStatus fileStatusNew = new FileStatus(fileStatus.getLen(), fileStatus.isDirectory(), + fileStatus.getReplication(), fileStatus.getBlockSize(), + fileStatus.getModificationTime() + 1, fileStatus.getAccessTime(), + fileStatus.getPermission(), fileStatus.getOwner(), fileStatus.getGroup(), + fileStatus.getPath()); + fileStatusMap.put(file, fileStatusNew); + } + } + + @SuppressWarnings("serial") + public static class MockAccessDenied extends IOException { + } + + @Override + public FSDataInputStream open(Path path, int i) throws IOException { + statistics.incrementReadOps(1); + checkAccess(); + MockFile file = findFile(path); + if (file != null) return new FSDataInputStream(new MockInputStream(file)); + throw new IOException("File not found: " + path); + } + + public MockFile findFile(Path path) { + for (MockFile file: files) { + if (file.path.equals(path)) { + return file; + } + } + for (MockFile file: globalFiles) { + if (file.path.equals(path)) { + return file; + } + } + return null; + } + + private void checkAccess() throws IOException { + if (blockedUgi == null) return; + if (!blockedUgi.equals(UserGroupInformation.getCurrentUser().getShortUserName())) return; + throw new MockAccessDenied(); + } + + @Override + public FSDataOutputStream create(Path path, FsPermission fsPermission, + boolean overwrite, int bufferSize, + short replication, long blockSize, + Progressable progressable + ) throws IOException { + statistics.incrementWriteOps(1); + checkAccess(); + MockFile file = findFile(path); + if (file == null) { + file = new MockFile(path.toString(), (int) blockSize, new byte[0]); + files.add(file); + } + return new MockOutputStream(file); + } + + @Override + public FSDataOutputStream append(Path path, int bufferSize, + Progressable progressable + ) throws IOException { + statistics.incrementWriteOps(1); + checkAccess(); + return create(path, FsPermission.getDefault(), true, bufferSize, + (short) 3, 256 * 1024, progressable); + } + + @Override + public boolean rename(Path path, Path path2) throws IOException { + statistics.incrementWriteOps(1); + checkAccess(); + return false; + } + + @Override + public boolean delete(Path path) throws IOException { + statistics.incrementWriteOps(1); + checkAccess(); + return false; + } + + @Override + public boolean delete(Path path, boolean isRecursive) throws IOException { + statistics.incrementWriteOps(1); + checkAccess(); + return allowDelete && isRecursive && deleteMatchingFiles(files, path.toString()); + } + + @Override + public RemoteIterator listLocatedStatus(final Path f) + throws IOException { + return new RemoteIterator() { + private Iterator iterator = listLocatedFileStatuses(f).iterator(); + + @Override + public boolean hasNext() throws IOException { + return iterator.hasNext(); + } + + @Override + public LocatedFileStatus next() throws IOException { + return iterator.next(); + } + }; + } + + private List listLocatedFileStatuses(Path path) throws IOException { + statistics.incrementReadOps(1); + checkAccess(); + path = path.makeQualified(this); + List result = new ArrayList<>(); + String pathname = path.toString(); + String pathnameAsDir = pathname + "/"; + Set dirs = new TreeSet(); + MockFile file = findFile(path); + if (file != null) { + result.add(createLocatedStatus(file)); + return result; + } + findMatchingLocatedFiles(files, pathnameAsDir, dirs, result); + findMatchingLocatedFiles(globalFiles, pathnameAsDir, dirs, result); + // for each directory add it once + for(String dir: dirs) { + result.add(createLocatedDirectory(new MockPath(this, pathnameAsDir + dir))); + } + return result; + } + + @Override + public FileStatus[] listStatus(Path path) throws IOException { + statistics.incrementReadOps(1); + checkAccess(); + path = path.makeQualified(this); + List result = new ArrayList(); + String pathname = path.toString(); + String pathnameAsDir = pathname + "/"; + Set dirs = new TreeSet(); + MockFile file = findFile(path); + if (file != null) { + return new FileStatus[]{createStatus(file)}; + } + findMatchingFiles(files, pathnameAsDir, dirs, result); + findMatchingFiles(globalFiles, pathnameAsDir, dirs, result); + // for each directory add it once + for(String dir: dirs) { + result.add(createDirectory(new MockPath(this, pathnameAsDir + dir))); + } + return result.toArray(new FileStatus[result.size()]); + } + + private void findMatchingFiles( + List files, String pathnameAsDir, Set dirs, List result) { + for (MockFile file: files) { + String filename = file.path.toString(); + if (filename.startsWith(pathnameAsDir)) { + String tail = filename.substring(pathnameAsDir.length()); + int nextSlash = tail.indexOf('/'); + if (nextSlash > 0) { + dirs.add(tail.substring(0, nextSlash)); + } else { + result.add(createStatus(file)); + } + } + } + } + + private boolean deleteMatchingFiles(List files, String path) { + Iterator fileIter = files.iterator(); + boolean result = true; + while (fileIter.hasNext()) { + MockFile file = fileIter.next(); + String filename = file.path.toString(); + if (!filename.startsWith(path)) continue; + if (filename.length() <= path.length() || filename.charAt(path.length()) != '/') continue; + if (file.cannotDelete) { + result = false; + continue; + } + assert !file.isDeleted; + file.isDeleted = true; + fileIter.remove(); + } + return result; + } + + private void findMatchingLocatedFiles( + List files, String pathnameAsDir, Set dirs, List result) + throws IOException { + for (MockFile file: files) { + String filename = file.path.toString(); + if (filename.startsWith(pathnameAsDir)) { + String tail = filename.substring(pathnameAsDir.length()); + int nextSlash = tail.indexOf('/'); + if (nextSlash > 0) { + dirs.add(tail.substring(0, nextSlash)); + } else { + result.add(createLocatedStatus(file)); + } + } + } + } + + @Override + public void setWorkingDirectory(Path path) { + workingDir = path; + } + + @Override + public Path getWorkingDirectory() { + return workingDir; + } + + @Override + public boolean mkdirs(Path path, FsPermission fsPermission) { + statistics.incrementWriteOps(1); + return false; + } + + private FileStatus createStatus(MockFile file) { + if (fileStatusMap.containsKey(file)) { + return fileStatusMap.get(file); + } + FileStatus fileStatus = new FileStatus(file.length, false, 1, file.blockSize, 0, 0, + FsPermission.createImmutable((short) 644), "owen", "group", + file.path); + fileStatusMap.put(file, fileStatus); + return fileStatus; + } + + private FileStatus createDirectory(Path dir) { + return new FileStatus(0, true, 0, 0, 0, 0, + FsPermission.createImmutable((short) 755), "owen", "group", dir); + } + + private LocatedFileStatus createLocatedStatus(MockFile file) throws IOException { + FileStatus fileStatus = createStatus(file); + return new LocatedFileStatus(fileStatus, + getFileBlockLocationsImpl(fileStatus, 0, fileStatus.getLen(), false)); + } + + private LocatedFileStatus createLocatedDirectory(Path dir) throws IOException { + FileStatus fileStatus = createDirectory(dir); + return new LocatedFileStatus(fileStatus, + getFileBlockLocationsImpl(fileStatus, 0, fileStatus.getLen(), false)); + } + + @Override + public FileStatus getFileStatus(Path path) throws IOException { + statistics.incrementReadOps(1); + checkAccess(); + path = path.makeQualified(this); + String pathnameAsDir = path.toString() + "/"; + MockFile file = findFile(path); + if (file != null) return createStatus(file); + for (MockFile dir : files) { + if (dir.path.toString().startsWith(pathnameAsDir)) { + return createDirectory(path); + } + } + for (MockFile dir : globalFiles) { + if (dir.path.toString().startsWith(pathnameAsDir)) { + return createDirectory(path); + } + } + throw new FileNotFoundException("File " + path + " does not exist"); + } + + @Override + public BlockLocation[] getFileBlockLocations(FileStatus stat, + long start, long len) throws IOException { + return getFileBlockLocationsImpl(stat, start, len, true); + } + + private BlockLocation[] getFileBlockLocationsImpl(final FileStatus stat, final long start, + final long len, + final boolean updateStats) throws IOException { + if (updateStats) { + statistics.incrementReadOps(1); + } + checkAccess(); + List result = new ArrayList(); + MockFile file = findFile(stat.getPath()); + if (file != null) { + for(MockBlock block: file.blocks) { + if (getOverlap(block.offset, block.length, start, len) > 0) { + String[] topology = new String[block.hosts.length]; + for(int i=0; i < topology.length; ++i) { + topology[i] = "/rack/ " + block.hosts[i]; + } + result.add(new BlockLocation(block.hosts, block.hosts, + topology, block.offset, block.length)); + } + } + return result.toArray(new BlockLocation[result.size()]); + } + return new BlockLocation[0]; + } + + + /** + * Compute the number of bytes that overlap between the two ranges. + * @param offset1 start of range1 + * @param length1 length of range1 + * @param offset2 start of range2 + * @param length2 length of range2 + * @return the number of bytes in the overlap range + */ + private static long getOverlap(long offset1, long length1, long offset2, long length2) { + // c/p from OrcInputFormat + long end1 = offset1 + length1; + long end2 = offset2 + length2; + if (end2 <= offset1 || end1 <= offset2) { + return 0; + } else { + return Math.min(end1, end2) - Math.max(offset1, offset2); + } + } + + @Override + public String toString() { + StringBuilder buffer = new StringBuilder(); + buffer.append("mockFs{files:["); + for(int i=0; i < files.size(); ++i) { + if (i != 0) { + buffer.append(", "); + } + buffer.append(files.get(i)); + } + buffer.append("]}"); + return buffer.toString(); + } + + public static void addGlobalFile(MockFile mockFile) { + globalFiles.add(mockFile); + } + + public static void clearGlobalFiles() { + globalFiles.clear(); + } + + + public static class MockBlock { + int offset; + int length; + final String[] hosts; + + public MockBlock(String... hosts) { + this.hosts = hosts; + } + + public void setOffset(int offset) { + this.offset = offset; + } + + public void setLength(int length) { + this.length = length; + } + + @Override + public String toString() { + StringBuilder buffer = new StringBuilder(); + buffer.append("block{offset: "); + buffer.append(offset); + buffer.append(", length: "); + buffer.append(length); + buffer.append(", hosts: ["); + for(int i=0; i < hosts.length; i++) { + if (i != 0) { + buffer.append(", "); + } + buffer.append(hosts[i]); + } + buffer.append("]}"); + return buffer.toString(); + } + } + + public static class MockFile { + public final Path path; + public int blockSize; + public int length; + public MockBlock[] blocks; + public byte[] content; + public boolean cannotDelete = false; + // This is purely for testing convenience; has no bearing on FS operations such as list. + public boolean isDeleted = false; + + public MockFile(String path, int blockSize, byte[] content, + MockBlock... blocks) { + this.path = new Path(path); + this.blockSize = blockSize; + this.blocks = blocks; + this.content = content; + this.length = content.length; + int offset = 0; + for(MockBlock block: blocks) { + block.offset = offset; + block.length = Math.min(length - offset, blockSize); + offset += block.length; + } + } + + @Override + public int hashCode() { + return path.hashCode() + 31 * length; + } + + @Override + public boolean equals(final Object obj) { + if (!(obj instanceof MockFile)) { return false; } + return ((MockFile) obj).path.equals(this.path) && ((MockFile) obj).length == this.length; + } + + @Override + public String toString() { + StringBuilder buffer = new StringBuilder(); + buffer.append("mockFile{path: "); + buffer.append(path.toString()); + buffer.append(", blkSize: "); + buffer.append(blockSize); + buffer.append(", len: "); + buffer.append(length); + buffer.append(", blocks: ["); + for(int i=0; i < blocks.length; i++) { + if (i != 0) { + buffer.append(", "); + } + buffer.append(blocks[i]); + } + buffer.append("]}"); + return buffer.toString(); + } + } + + static class MockInputStream extends FSInputStream { + final MockFile file; + int offset = 0; + + public MockInputStream(MockFile file) throws IOException { + this.file = file; + } + + @Override + public void seek(long offset) throws IOException { + this.offset = (int) offset; + } + + @Override + public long getPos() throws IOException { + return offset; + } + + @Override + public boolean seekToNewSource(long l) throws IOException { + return false; + } + + @Override + public int read() throws IOException { + if (offset < file.length) { + return file.content[offset++] & 0xff; + } + return -1; + } + } + + public static class MockPath extends Path { + private final FileSystem fs; + public MockPath(FileSystem fs, String path) { + super(path); + this.fs = fs; + } + @Override + public FileSystem getFileSystem(Configuration conf) { + return fs; + } + } + + public static class MockOutputStream extends FSDataOutputStream { + public final MockFile file; + + public MockOutputStream(MockFile file) throws IOException { + super(new DataOutputBuffer(), null); + this.file = file; + } + + /** + * Set the blocks and their location for the file. + * Must be called after the stream is closed or the block length will be + * wrong. + * @param blocks the list of blocks + */ + public void setBlocks(MockBlock... blocks) { + file.blocks = blocks; + int offset = 0; + int i = 0; + while (offset < file.length && i < blocks.length) { + blocks[i].offset = offset; + blocks[i].length = Math.min(file.length - offset, file.blockSize); + offset += blocks[i].length; + i += 1; + } + } + + @Override + public void close() throws IOException { + super.close(); + DataOutputBuffer buf = (DataOutputBuffer) getWrappedStream(); + file.length = buf.getLength(); + file.content = new byte[file.length]; + MockBlock block = new MockBlock("host1"); + block.setLength(file.length); + setBlocks(block); + System.arraycopy(buf.getData(), 0, file.content, 0, file.length); + } + + @Override + public String toString() { + return "Out stream to " + file.toString(); + } + } + + public void addFile(MockFile file) { + files.add(file); + } +} \ No newline at end of file diff --git a/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql b/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql index 88a48f0705ec..cb6e5f6f16f9 100644 --- a/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql +++ b/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql @@ -1,6 +1,6 @@ ALTER TABLE "TBLS" ADD "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1; ALTER TABLE "TBLS" ADD "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0; -CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "LAST_HEARTBEAT" BIGINT); +CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "CREATED" BIGINT NOT NULL, "LAST_HEARTBEAT" BIGINT NOT NULL); ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; CREATE UNIQUE INDEX "APP"."UNIQUEWRITE" ON "APP"."TBL_WRITES" ("TBL_ID", "WRITE_ID"); diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql index f86ee4af9413..9da1703dae48 100644 --- a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql +++ b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql @@ -112,7 +112,7 @@ ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID"); -CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "LAST_HEARTBEAT" BIGINT); +CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "CREATED" BIGINT NOT NULL, "LAST_HEARTBEAT" BIGINT NOT NULL); ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); diff --git a/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql b/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql index 5d6f99f59ae8..9666d2be1d44 100644 --- a/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql +++ b/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql @@ -7,7 +7,8 @@ CREATE TABLE TBL_WRITES TBL_ID BIGINT NOT NULL, WRITE_ID BIGINT NOT NULL, STATE CHAR(1) NOT NULL, - LAST_HEARTBEAT BIGINT + CREATED BIGINT NOT NULL, + LAST_HEARTBEAT BIGINT NOT NULL ); ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql index 26b2ab3cb2bc..31016e201c31 100644 --- a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql +++ b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql @@ -600,7 +600,8 @@ CREATE TABLE TBL_WRITES TBL_ID BIGINT NOT NULL, WRITE_ID BIGINT NOT NULL, STATE CHAR(1) NOT NULL, - LAST_HEARTBEAT BIGINT + CREATED BIGINT NOT NULL, + LAST_HEARTBEAT BIGINT NOT NULL ); ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); diff --git a/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql b/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql index c024584d5a1d..9e34db2e45c4 100644 --- a/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql +++ b/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql @@ -7,7 +7,8 @@ CREATE TABLE IF NOT EXISTS `TBL_WRITES` `TBL_ID` BIGINT NOT NULL, `WRITE_ID` BIGINT NOT NULL, `STATE` CHAR(1) NOT NULL, - `LAST_HEARTBEAT` BIGINT, + `CREATED` BIGINT NOT NULL, + `LAST_HEARTBEAT` BIGINT NOT NULL, PRIMARY KEY (`TW_ID`), UNIQUE KEY `UNIQUEWRITE` (`TBL_ID`,`WRITE_ID`), CONSTRAINT `TBL_WRITES_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) diff --git a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql index b295950c848f..3e73008f1de5 100644 --- a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql +++ b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql @@ -835,7 +835,8 @@ CREATE TABLE IF NOT EXISTS `TBL_WRITES` `TBL_ID` BIGINT NOT NULL, `WRITE_ID` BIGINT NOT NULL, `STATE` CHAR(1) NOT NULL, - `LAST_HEARTBEAT` BIGINT, + `CREATED` BIGINT NOT NULL, + `LAST_HEARTBEAT` BIGINT NOT NULL, PRIMARY KEY (`TW_ID`), UNIQUE KEY `UNIQUEWRITE` (`TBL_ID`,`WRITE_ID`), CONSTRAINT `TBL_WRITES_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) diff --git a/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql b/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql index 9f6dbb234b88..218eefe07583 100644 --- a/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql +++ b/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql @@ -7,6 +7,7 @@ CREATE TABLE TBL_WRITES TBL_ID NUMBER NOT NULL, WRITE_ID NUMBER NOT NULL, STATE CHAR(1) NOT NULL, + CREATED NUMBER NOT NULL, LAST_HEARTBEAT NUMBER NOT NULL ); ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql index 6972c2066907..5479712ef109 100644 --- a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql @@ -805,6 +805,7 @@ CREATE TABLE TBL_WRITES TBL_ID NUMBER NOT NULL, WRITE_ID NUMBER NOT NULL, STATE CHAR(1) NOT NULL, + CREATED NUMBER NOT NULL, LAST_HEARTBEAT NUMBER NOT NULL ); diff --git a/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql b/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql index f153837c459e..310f51ec4321 100644 --- a/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql +++ b/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql @@ -8,6 +8,7 @@ CREATE TABLE "TBL_WRITES" "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, + "CREATED" BIGINT NOT NULL, "LAST_HEARTBEAT" BIGINT NOT NULL ); ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); diff --git a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql index de997d357404..bc865edfd53d 100644 --- a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql +++ b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql @@ -614,6 +614,7 @@ CREATE TABLE "TBL_WRITES" "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, + "CREATED" BIGINT NOT NULL, "LAST_HEARTBEAT" BIGINT NOT NULL ); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index aa6d1eba1022..128e06ab86f4 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -6460,7 +6460,7 @@ public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws Thread.sleep(random.nextInt(deadlockRetryBackoffMs)); } - // Do a separate txn after we have reserved the number. TODO: If we fail, ignore on read. + // Do a separate txn after we have reserved the number. boolean ok = false; ms.openTransaction(); try { @@ -6525,11 +6525,18 @@ public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) startFunction("heartbeat_write_id", " : db=" + dbName + " tbl=" + tblName + " writeId=" + writeId); Exception ex = null; + boolean wasAborted = false; try { boolean ok = false; ms.openTransaction(); try { MTableWrite tw = getActiveTableWrite(ms, dbName, tblName, writeId); + long absTimeout = HiveConf.getTimeVar(getConf(), + ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT, TimeUnit.MILLISECONDS); + if (tw.getCreated() + absTimeout < System.currentTimeMillis()) { + tw.setState(String.valueOf(MM_WRITE_ABORTED)); + wasAborted = true; + } tw.setLastHeartbeat(System.currentTimeMillis()); ms.updateTableWrite(tw); ok = true; @@ -6542,6 +6549,7 @@ public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) } finally { endFunction("heartbeat_write_id", ex == null, ex, tblName); } + if (wasAborted) throw new MetaException("The write was aborted due to absolute timeout"); return new HeartbeatWriteIdResult(); } @@ -6576,10 +6584,10 @@ public GetValidWriteIdsResult get_valid_write_ids( long watermarkId = tbl.isSetMmWatermarkWriteId() ? tbl.getMmWatermarkWriteId() : -1; if (nextId > (watermarkId + 1)) { // There may be some intermediate failed or active writes; get the valid ones. - List ids = ms.getWriteIds( + List ids = ms.getTableWriteIds( dbName, tblName, watermarkId, nextId, MM_WRITE_COMMITTED); // TODO: we could optimize here and send the smaller of the lists, and also use ranges - if (ids != null) { + if (!ids.isEmpty()) { Iterator iter = ids.iterator(); long oldWatermarkId = watermarkId; while (iter.hasNext()) { @@ -7057,6 +7065,7 @@ public void run() { startCompactorInitiator(conf); startCompactorWorkers(conf); startCompactorCleaner(conf); + startMmHousekeepingThread(conf); startHouseKeeperService(conf); } catch (Throwable e) { LOG.error("Failure when starting the compactor, compactions may not happen, " + @@ -7096,6 +7105,16 @@ private static void startCompactorCleaner(HiveConf conf) throws Exception { } } + private static void startMmHousekeepingThread(HiveConf conf) throws Exception { + long intervalMs = HiveConf.getTimeVar(conf, + ConfVars.HIVE_METASTORE_MM_THREAD_SCAN_INTERVAL, TimeUnit.MILLISECONDS); + if (intervalMs > 0) { + MetaStoreThread thread = new MmCleanerThread(intervalMs); + initializeAndStartThread(thread, conf); + } + } + + private static MetaStoreThread instantiateThread(String classname) throws Exception { Class c = Class.forName(classname); Object o = c.newInstance(); @@ -7118,6 +7137,7 @@ private static void initializeAndStartThread(MetaStoreThread thread, HiveConf co thread.init(new AtomicBoolean(), new AtomicBoolean()); thread.start(); } + private static void startHouseKeeperService(HiveConf conf) throws Exception { if(!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) { return; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java index a0c8d3b0e3c5..d4d94ffe4bd2 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java @@ -51,6 +51,7 @@ public interface MetaStoreThread { * thread should then assure that the loop has been gone completely through at * least once. */ + // TODO: move these test parameters to more specific places... there's no need to have them here void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException; /** diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 41385f7108f4..c2ce2590f59b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -1884,4 +1884,14 @@ public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) } csNew.setStatsObj(list); } + + public static boolean isMmTable(Table table) { + return isMmTable(table.getParameters()); + } + + public static boolean isMmTable(Map params) { + // TODO: perhaps it should be a 3rd value for 'transactional'? + String value = params.get(hive_metastoreConstants.TABLE_IS_MM); + return value != null && value.equalsIgnoreCase("true"); + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java new file mode 100644 index 000000000000..6a7f588bb8c9 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java @@ -0,0 +1,397 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.ValidWriteIds; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.RawStore.FullTableName; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.model.MTableWrite; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Supplier; + +public class MmCleanerThread extends Thread implements MetaStoreThread { + private final static Logger LOG = LoggerFactory.getLogger(MmCleanerThread.class); + private HiveConf conf; + private int threadId; + private AtomicBoolean stop; + private long intervalMs; + private long heartbeatTimeoutMs, absTimeoutMs, abortedGraceMs; + /** Time override for tests. Only used for MM timestamp logic, not for the thread timing. */ + private Supplier timeOverride = null; + + public MmCleanerThread(long intervalMs) { + this.intervalMs = intervalMs; + } + + @VisibleForTesting + void overrideTime(Supplier timeOverride) { + this.timeOverride = timeOverride; + } + + private long getTimeMs() { + return timeOverride == null ? System.currentTimeMillis() : timeOverride.get(); + } + + @Override + public void setHiveConf(HiveConf conf) { + this.conf = conf; + heartbeatTimeoutMs = HiveConf.getTimeVar( + conf, ConfVars.HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT, TimeUnit.MILLISECONDS); + absTimeoutMs = HiveConf.getTimeVar( + conf, ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT, TimeUnit.MILLISECONDS); + abortedGraceMs = HiveConf.getTimeVar( + conf, ConfVars.HIVE_METASTORE_MM_ABORTED_GRACE_PERIOD, TimeUnit.MILLISECONDS); + if (heartbeatTimeoutMs > absTimeoutMs) { + throw new RuntimeException("Heartbeat timeout " + heartbeatTimeoutMs + + " cannot be larger than the absolute timeout " + absTimeoutMs); + } + } + + @Override + public void setThreadId(int threadId) { + this.threadId = threadId; + } + + @Override + public void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException { + this.stop = stop; + setPriority(MIN_PRIORITY); + setDaemon(true); + } + + @Override + public void run() { + // Only get RS here, when we are already on the thread. + RawStore rs = getRs(); + while (true) { + if (checkStop()) return; + long endTimeNs = System.nanoTime() + intervalMs * 1000000L; + + runOneIteration(rs); + + if (checkStop()) return; + long waitTimeMs = (endTimeNs - System.nanoTime()) / 1000000L; + if (waitTimeMs <= 0) continue; + try { + Thread.sleep(waitTimeMs); + } catch (InterruptedException e) { + LOG.error("Thread was interrupted and will now exit"); + return; + } + } + } + + private RawStore getRs() { + try { + return RawStoreProxy.getProxy(conf, conf, + conf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL), threadId); + } catch (MetaException e) { + LOG.error("Failed to get RawStore; the thread will now die", e); + throw new RuntimeException(e); + } + } + + private boolean checkStop() { + if (!stop.get()) return false; + LOG.info("Stopping due to an external request"); + return true; + } + + @VisibleForTesting + void runOneIteration(RawStore rs) { + // We only get the names here; we want to get and process each table in a separate DB txn. + List mmTables = null; + try { + mmTables = rs.getAllMmTablesForCleanup(); + } catch (MetaException e) { + LOG.error("Failed to get tables", e); + return; + } + for (FullTableName tableName : mmTables) { + try { + processOneTable(tableName, rs); + } catch (MetaException e) { + LOG.error("Failed to process " + tableName, e); + } + } + } + + private void processOneTable(FullTableName table, RawStore rs) throws MetaException { + // 1. Time out writes that have been running for a while. + // a) Heartbeat timeouts (not enabled right now as heartbeat is not implemented). + // b) Absolute timeouts. + // c) Gaps that have the next ID and the derived absolute timeout. This is a small special + // case that can happen if we increment next ID but fail to insert the write ID record, + // which we do in separate txns to avoid making the conflict-prone increment txn longer. + LOG.info("Processing table " + table); + Table t = rs.getTable(table.dbName, table.tblName); + HashSet removeWriteIds = new HashSet<>(), cleanupOnlyWriteIds = new HashSet<>(); + getWritesThatReadyForCleanUp(t, table, rs, removeWriteIds, cleanupOnlyWriteIds); + + // 2. Delete the aborted writes' files from the FS. + deleteAbortedWriteIdFiles(table, rs, t, removeWriteIds); + deleteAbortedWriteIdFiles(table, rs, t, cleanupOnlyWriteIds); + // removeWriteIds-s now only contains the writes that were fully cleaned up after. + + // 3. Advance the watermark. + advanceWatermark(table, rs, removeWriteIds); + } + + private void getWritesThatReadyForCleanUp(Table t, FullTableName table, RawStore rs, + HashSet removeWriteIds, HashSet cleanupOnlyWriteIds) throws MetaException { + // We will generally ignore errors here. First, we expect some conflicts; second, we will get + // the final view of things after we do (or try, at any rate) all the updates. + long watermarkId = t.isSetMmWatermarkWriteId() ? t.getMmWatermarkWriteId() : -1, + nextWriteId = t.isSetMmNextWriteId() ? t.getMmNextWriteId() : 0; + long now = getTimeMs(), earliestOkHeartbeatMs = now - heartbeatTimeoutMs, + earliestOkCreateMs = now - absTimeoutMs, latestAbortedMs = now - abortedGraceMs; + + List writes = rs.getTableWrites( + table.dbName, table.tblName, watermarkId, nextWriteId); + ListIterator iter = writes.listIterator(writes.size()); + long expectedId = -1, nextCreated = -1; + // We will go in reverse order and add aborted writes for the gaps that have a following + // write ID that would imply that the previous one (created earlier) would have already + // expired, had it been open and not updated. + while (iter.hasPrevious()) { + MTableWrite write = iter.previous(); + addTimedOutMissingWriteIds(rs, table.dbName, table.tblName, write.getWriteId(), + nextCreated, expectedId, earliestOkHeartbeatMs, cleanupOnlyWriteIds, now); + expectedId = write.getWriteId() - 1; + nextCreated = write.getCreated(); + char state = write.getState().charAt(0); + if (state == HiveMetaStore.MM_WRITE_ABORTED) { + if (write.getLastHeartbeat() < latestAbortedMs) { + removeWriteIds.add(write.getWriteId()); + } else { + cleanupOnlyWriteIds.add(write.getWriteId()); + } + } else if (state == HiveMetaStore.MM_WRITE_OPEN && write.getCreated() < earliestOkCreateMs) { + // TODO: also check for heartbeat here. + if (expireTimedOutWriteId(rs, table.dbName, table.tblName, write.getWriteId(), + now, earliestOkCreateMs, earliestOkHeartbeatMs, cleanupOnlyWriteIds)) { + cleanupOnlyWriteIds.add(write.getWriteId()); + } + } + } + addTimedOutMissingWriteIds(rs, table.dbName, table.tblName, watermarkId, + nextCreated, expectedId, earliestOkHeartbeatMs, cleanupOnlyWriteIds, now); + } + + private void advanceWatermark( + FullTableName table, RawStore rs, HashSet cleanedUpWriteIds) { + if (!rs.openTransaction()) { + LOG.error("Cannot open transaction"); + return; + } + boolean success = false; + try { + Table t = rs.getTable(table.dbName, table.tblName); + if (t == null) { + return; + } + long watermarkId = t.getMmWatermarkWriteId(); + List writeIds = rs.getTableWriteIds(table.dbName, table.tblName, watermarkId, + t.getMmNextWriteId(), HiveMetaStore.MM_WRITE_COMMITTED); + long expectedId = watermarkId + 1; + boolean hasGap = false; + Iterator idIter = writeIds.iterator(); + while (idIter.hasNext()) { + long next = idIter.next(); + if (next < expectedId) continue; + while (next > expectedId) { + if (!cleanedUpWriteIds.contains(expectedId)) { + hasGap = true; + break; + } + ++expectedId; + } + if (hasGap) break; + ++expectedId; + } + // Make sure we also advance over the trailing aborted ones. + if (!hasGap) { + while (cleanedUpWriteIds.contains(expectedId)) { + ++expectedId; + } + } + long newWatermarkId = expectedId - 1; + if (newWatermarkId > watermarkId) { + t.setMmWatermarkWriteId(newWatermarkId); + rs.alterTable(table.dbName, table.tblName, t); + rs.deleteTableWrites(table.dbName, table.tblName, -1, expectedId); + } + success = true; + } catch (Exception ex) { + // TODO: should we try a couple times on conflicts? Aborted writes cannot be unaborted. + LOG.error("Failed to advance watermark", ex); + rs.rollbackTransaction(); + } + if (success) { + tryCommit(rs); + } + } + + private void deleteAbortedWriteIdFiles( + FullTableName table, RawStore rs, Table t, HashSet cleanUpWriteIds) { + if (cleanUpWriteIds.isEmpty()) return; + if (t.getPartitionKeysSize() > 0) { + for (String location : rs.getAllPartitionLocations(table.dbName, table.tblName)) { + deleteAbortedWriteIdFiles(location, cleanUpWriteIds); + } + } else { + deleteAbortedWriteIdFiles(t.getSd().getLocation(), cleanUpWriteIds); + } + } + + private void deleteAbortedWriteIdFiles(String location, HashSet abortedWriteIds) { + LOG.info("Looking for " + abortedWriteIds.size() + " aborted write output in " + location); + Path path = new Path(location); + FileSystem fs; + FileStatus[] files; + try { + fs = path.getFileSystem(conf); + if (!fs.exists(path)) { + LOG.warn(path + " does not exist; assuming that the cleanup is not needed."); + return; + } + // TODO# do we need to account for any subdirectories here? decide after special-case jiras + files = fs.listStatus(path); + } catch (Exception ex) { + LOG.error("Failed to get files for " + path + "; cannot ensure cleanup for any writes"); + abortedWriteIds.clear(); + return; + } + for (FileStatus file : files) { + Path childPath = file.getPath(); + if (!file.isDirectory()) { + LOG.warn("Skipping a non-directory file " + childPath); + continue; + } + Long writeId = ValidWriteIds.extractWriteId(childPath); + if (writeId == null) { + LOG.warn("Skipping an unknown directory " + childPath); + continue; + } + if (!abortedWriteIds.contains(writeId.longValue())) continue; + try { + if (!fs.delete(childPath, true)) throw new IOException("delete returned false"); + } catch (Exception ex) { + LOG.error("Couldn't delete " + childPath + "; not cleaning up " + writeId, ex); + abortedWriteIds.remove(writeId.longValue()); + } + } + } + + private boolean expireTimedOutWriteId(RawStore rs, String dbName, + String tblName, long writeId, long now, long earliestOkCreatedMs, + long earliestOkHeartbeatMs, HashSet cleanupOnlyWriteIds) { + if (!rs.openTransaction()) { + return false; + } + try { + MTableWrite tw = rs.getTableWrite(dbName, tblName, writeId); + if (tw == null) { + // The write have been updated since the time when we thought it has expired. + tryCommit(rs); + return true; + } + char state = tw.getState().charAt(0); + if (state != HiveMetaStore.MM_WRITE_OPEN + || (tw.getCreated() > earliestOkCreatedMs + && tw.getLastHeartbeat() > earliestOkHeartbeatMs)) { + tryCommit(rs); + return true; // The write has been updated since the time when we thought it has expired. + } + tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_ABORTED)); + tw.setLastHeartbeat(now); + rs.updateTableWrite(tw); + } catch (Exception ex) { + LOG.error("Failed to update an expired table write", ex); + rs.rollbackTransaction(); + return false; + } + boolean result = tryCommit(rs); + if (result) { + cleanupOnlyWriteIds.add(writeId); + } + return result; + } + + private boolean tryCommit(RawStore rs) { + try { + return rs.commitTransaction(); + } catch (Exception ex) { + LOG.error("Failed to commit transaction", ex); + return false; + } + } + + private boolean addTimedOutMissingWriteIds(RawStore rs, String dbName, String tblName, + long foundPrevId, long nextCreated, long expectedId, long earliestOkHeartbeatMs, + HashSet cleanupOnlyWriteIds, long now) throws MetaException { + // Assume all missing ones are created at the same time as the next present write ID. + // We also assume missing writes never had any heartbeats. + if (nextCreated >= earliestOkHeartbeatMs || expectedId < 0) return true; + Table t = null; + List localCleanupOnlyWriteIds = new ArrayList<>(); + while (foundPrevId < expectedId) { + if (t == null && !rs.openTransaction()) { + LOG.error("Cannot open transaction; skipping"); + return false; + } + try { + if (t == null) { + t = rs.getTable(dbName, tblName); + } + // We don't need to double check if the write exists; the unique index will cause an error. + rs.createTableWrite(t, expectedId, HiveMetaStore.MM_WRITE_ABORTED, now); + } catch (Exception ex) { + // TODO: don't log conflict exceptions?.. although we barely ever expect them. + LOG.error("Failed to create a missing table write", ex); + rs.rollbackTransaction(); + return false; + } + localCleanupOnlyWriteIds.add(expectedId); + --expectedId; + } + boolean result = (t == null || tryCommit(rs)); + if (result) { + cleanupOnlyWriteIds.addAll(localCleanupOnlyWriteIds); + } + return result; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index fb3b1ada9bba..32e4daff85c2 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -711,7 +711,7 @@ public Database getDatabase(String name) throws NoSuchObjectException { } public Database getDatabaseInternal(String name) throws MetaException, NoSuchObjectException { - return new GetDbHelper(name, null, true, true) { + return new GetDbHelper(name, true, true) { @Override protected Database getSqlResult(GetHelper ctx) throws MetaException { return directSql.getDatabase(dbName); @@ -1183,14 +1183,7 @@ private List listAllTableWrites(String dbName, String tableName) { pm.retrieveAll(result); success = true; } finally { - if (success) { - commitTransaction(); - } else { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + closeTransaction(success, query); } return result; } @@ -2951,15 +2944,13 @@ protected String describeResult() { public abstract class GetDbHelper extends GetHelper { /** * GetHelper for returning db info using directSql/JDO. - * Since this is a db-level call, tblName is ignored, and null is passed irrespective of what is passed in. * @param dbName The Database Name - * @param tblName Placeholder param to match signature, always ignored. * @param allowSql Whether or not we allow DirectSQL to perform this query. * @param allowJdo Whether or not we allow ORM to perform this query. * @throws MetaException */ public GetDbHelper( - String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException { + String dbName,boolean allowSql, boolean allowJdo) throws MetaException { super(dbName,null,allowSql,allowJdo); } @@ -8713,7 +8704,7 @@ public void createTableWrite(Table tbl, long writeId, char state, long heartbeat openTransaction(); try { MTable mtbl = getMTable(tbl.getDbName(), tbl.getTableName()); - MTableWrite tw = new MTableWrite(mtbl, writeId, String.valueOf(state), heartbeat); + MTableWrite tw = new MTableWrite(mtbl, writeId, String.valueOf(state), heartbeat, heartbeat); pm.makePersistent(tw); success = true; } finally { @@ -8746,8 +8737,8 @@ public MTableWrite getTableWrite( String dbName, String tblName, long writeId) throws MetaException { boolean success = false; Query query = null; + openTransaction(); try { - openTransaction(); query = pm.newQuery(MTableWrite.class, "table.tableName == t1 && table.database.name == t2 && writeId == t3"); query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3"); @@ -8762,45 +8753,129 @@ public MTableWrite getTableWrite( } return writes.get(0); } finally { - if (success) { - commitTransaction(); - } else { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); - } + closeTransaction(success, query); } } @Override - public List getWriteIds(String dbName, String tblName, + public List getTableWriteIds(String dbName, String tblName, long watermarkId, long nextWriteId, char state) throws MetaException { boolean success = false; Query query = null; + openTransaction(); try { - openTransaction(); + boolean hasState = (state != '\0'); query = pm.newQuery("select writeId from org.apache.hadoop.hive.metastore.model.MTableWrite" - + " where table.tableName == t1 && table.database.name == t2 && writeId >= t3" - + " && writeId < t4 && state == t5"); + + " where table.tableName == t1 && table.database.name == t2 && writeId > t3" + + " && writeId < t4" + (hasState ? " && state == t5" : "")); query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3, " - + "java.lang.Long t4, java.lang.String t5"); + + "java.lang.Long t4" + (hasState ? ", java.lang.String t5" : "")); query.setResult("writeId"); query.setOrdering("writeId asc"); @SuppressWarnings("unchecked") - List writes = (List) query.executeWithArray( - tblName, dbName, watermarkId, nextWriteId, String.valueOf(state)); + List writes = (List) (hasState + ? query.executeWithArray(tblName, dbName, watermarkId, nextWriteId, String.valueOf(state)) + : query.executeWithArray(tblName, dbName, watermarkId, nextWriteId)); + success = true; + return (writes == null) ? new ArrayList() : new ArrayList<>(writes); + } finally { + closeTransaction(success, query); + } + } + + @Override + public List getTableWrites( + String dbName, String tblName, long from, long to) throws MetaException { + boolean success = false; + Query query = null; + openTransaction(); + try { + query = pm.newQuery(MTableWrite.class, + "table.tableName == t1 && table.database.name == t2 && writeId > t3 && writeId < t4"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.Long t3, java.lang.Long t4"); + query.setOrdering("writeId asc"); + @SuppressWarnings("unchecked") + List writes = + (List) query.executeWithArray(tblName, dbName, from, to); success = true; return (writes == null || writes.isEmpty()) ? null : new ArrayList<>(writes); } finally { - if (success) { - commitTransaction(); - } else { - rollbackTransaction(); - } - if (query != null) { - query.closeAll(); + closeTransaction(success, query); + } + } + + + @Override + public void deleteTableWrites( + String dbName, String tblName, long from, long to) throws MetaException { + boolean success = false; + Query query = null; + openTransaction(); + try { + query = pm.newQuery(MTableWrite.class, + "table.tableName == t1 && table.database.name == t2 && writeId > t3 && writeId < t4"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.Long t3, java.lang.Long t4"); + query.deletePersistentAll(tblName, dbName, from, to); + success = true; + } finally { + closeTransaction(success, query); + } + } + + @Override + public List getAllMmTablesForCleanup() throws MetaException { + boolean success = false; + Query query = null; + openTransaction(); + try { + // If the table had no MM writes, there's nothing to clean up + query = pm.newQuery(MTable.class, "mmNextWriteId > 0"); + @SuppressWarnings("unchecked") + List tables = (List) query.execute(); + pm.retrieveAll(tables); + ArrayList result = new ArrayList<>(tables.size()); + for (MTable table : tables) { + if (MetaStoreUtils.isMmTable(table.getParameters())) { + result.add(new FullTableName(table.getDatabase().getName(), table.getTableName())); + } } + success = true; + return result; + } finally { + closeTransaction(success, query); + } + } + + @Override + public Collection getAllPartitionLocations(String dbName, String tblName) { + boolean success = false; + Query query = null; + openTransaction(); + try { + String q = "select sd.location from org.apache.hadoop.hive.metastore.model.MPartition" + + " where table.tableName == t1 && table.database.name == t2"; + query = pm.newQuery(); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + @SuppressWarnings("unchecked") + List tables = (List) query.execute(); + pm.retrieveAll(tables); + success = true; + return new ArrayList<>(tables); + } finally { + closeTransaction(success, query); + } + } + + private void closeTransaction(boolean success, Query query) { + if (success) { + commitTransaction(); + } else { + rollbackTransaction(); + } + if (query != null) { + query.closeAll(); } } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 170c07d4c4c9..76ead25f60b1 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -23,6 +23,7 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.nio.ByteBuffer; +import java.util.Collection; import java.util.List; import java.util.Map; @@ -697,5 +698,28 @@ void createTableWithConstraints(Table tbl, List primaryKeys, void createTableWrite(Table tbl, long writeId, char state, long heartbeat); - List getWriteIds(String dbName, String tblName, long watermarkId, long nextWriteId, char state) throws MetaException; + List getTableWriteIds(String dbName, String tblName, long watermarkId, long nextWriteId, char state) throws MetaException; + + + public static final class FullTableName { + public final String dbName, tblName; + + public FullTableName(String dbName, String tblName) { + this.dbName = dbName; + this.tblName = tblName; + } + + @Override + public String toString() { + return dbName + "." + tblName; + } + } + + List getAllMmTablesForCleanup() throws MetaException; + + public List getTableWrites(String dbName, String tblName, long from, long to) throws MetaException; + + Collection getAllPartitionLocations(String dbName, String tblName); + + void deleteTableWrites(String dbName, String tblName, long from, long to) throws MetaException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index 829f0aeda699..ddc5a629a56c 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -2759,9 +2759,36 @@ public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { @Override - public List getWriteIds( + public List getTableWriteIds( String dbName, String tblName, long watermarkId, long nextWriteId, char state) { // TODO: Auto-generated method stub throw new UnsupportedOperationException(); } + + @Override + public List getAllMmTablesForCleanup() throws MetaException { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public List getTableWrites(String dbName, String tblName, + long from, long to) throws MetaException { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public Collection getAllPartitionLocations(String dbName, + String tblName) { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public void deleteTableWrites(String dbName, String tblName, long from, + long to) throws MetaException { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } } diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java index a7e5f3e00046..b7f398a1841f 100644 --- a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java +++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java @@ -23,14 +23,16 @@ public class MTableWrite { private long writeId; private String state; private long lastHeartbeat; + private long created; public MTableWrite() {} - public MTableWrite(MTable table, long writeId, String state, long lastHeartbeat) { + public MTableWrite(MTable table, long writeId, String state, long lastHeartbeat, long created) { this.table = table; this.writeId = writeId; this.state = state; this.lastHeartbeat = lastHeartbeat; + this.created = created; } public MTable getTable() { @@ -49,6 +51,10 @@ public long getLastHeartbeat() { return lastHeartbeat; } + public long getCreated() { + return created; + } + public void setTable(MTable table) { this.table = table; } @@ -64,4 +70,8 @@ public void setState(String state) { public void setLastHeartbeat(long lastHeartbeat) { this.lastHeartbeat = lastHeartbeat; } + + public void setCreated(long created) { + this.created = created; + } } diff --git a/metastore/src/model/package.jdo b/metastore/src/model/package.jdo index bd71056faea8..ce101dd002ec 100644 --- a/metastore/src/model/package.jdo +++ b/metastore/src/model/package.jdo @@ -1082,6 +1082,9 @@ + + + diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 98c543fe988f..acbbf4e40919 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -20,6 +20,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; @@ -886,8 +887,30 @@ public Boolean commitTransactionExpectDeadlock() { } @Override - public List getWriteIds( + public List getTableWriteIds( String dbName, String tblName, long watermarkId, long nextWriteId, char state) { return null; } + + @Override + public List getAllMmTablesForCleanup() throws MetaException { + return null; + } + + @Override + public List getTableWrites(String dbName, String tblName, + long from, long to) throws MetaException { + return null; + } + + @Override + public Collection getAllPartitionLocations(String dbName, + String tblName) { + return null; + } + + @Override + public void deleteTableWrites(String dbName, String tblName, long from, + long to) throws MetaException { + } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 8e54b1629304..787c1f066199 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import java.nio.ByteBuffer; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; @@ -898,10 +899,32 @@ public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { } @Override - public List getWriteIds( + public List getTableWriteIds( String dbName, String tblName, long watermarkId, long nextWriteId, char state) { return null; } + + @Override + public List getAllMmTablesForCleanup() throws MetaException { + return null; + } + + @Override + public List getTableWrites(String dbName, String tblName, + long from, long to) throws MetaException { + return null; + } + + @Override + public Collection getAllPartitionLocations(String dbName, + String tblName) { + return null; + } + + @Override + public void deleteTableWrites(String dbName, String tblName, long from, + long to) throws MetaException { + } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java index 04971591a680..a8d34955120f 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -17,16 +17,20 @@ */ package org.apache.hadoop.hive.metastore; +import static org.junit.Assert.*; + import java.util.Arrays; import java.util.HashMap; import java.util.List; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; import org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics; import org.apache.hadoop.hive.common.metrics.metrics2.MetricsReporting; import org.apache.hadoop.hive.common.metrics.MetricsTestUtils; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; @@ -42,9 +46,13 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hive.common.util.MockFileSystem; +import org.apache.hive.common.util.MockFileSystem.MockFile; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -53,6 +61,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Supplier; + public class TestObjectStore { private ObjectStore objectStore = null; @@ -67,6 +77,15 @@ public class TestObjectStore { private static final String ROLE2 = "testobjectstorerole2"; private static final Logger LOG = LoggerFactory.getLogger(TestObjectStore.class.getName()); + private static final class LongSupplier implements Supplier { + public long value = 0; + + @Override + public Long get() { + return value; + } + } + public static class MockPartitionExpressionProxy implements PartitionExpressionProxy { @Override public String convertExprToFilter(byte[] expr) throws MetaException { @@ -142,7 +161,7 @@ public void testDatabaseOps() throws MetaException, InvalidObjectException, NoSu public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { Database db1 = new Database(DB1, "description", "locationurl", null); objectStore.createDatabase(db1); - StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); + StorageDescriptor sd = createFakeSd("location"); HashMap params = new HashMap(); params.put("EXTERNAL", "false"); Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); @@ -164,6 +183,156 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO objectStore.dropDatabase(DB1); } + + + /** + * Test table operations + */ + @Test + public void testMmCleaner() throws Exception { + HiveConf conf = new HiveConf(); + conf.set(ConfVars.HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT.varname, "3ms"); + conf.set(ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT.varname, "20ms"); + conf.set(ConfVars.HIVE_METASTORE_MM_ABORTED_GRACE_PERIOD.varname, "5ms"); + conf.set("fs.mock.impl", MockFileSystem.class.getName()); + + MockFileSystem mfs = (MockFileSystem)(new Path("mock:///").getFileSystem(conf)); + mfs.clear(); + mfs.allowDelete = true; + // Don't add the files just yet... + MockFile[] files = new MockFile[9]; + for (int i = 0; i < files.length; ++i) { + files[i] = new MockFile("mock:/foo/mm_" + i + "/1", 0, new byte[0]); + } + + LongSupplier time = new LongSupplier(); + + MmCleanerThread mct = new MmCleanerThread(0); + mct.setHiveConf(conf); + mct.overrideTime(time); + + Database db1 = new Database(DB1, "description", "locationurl", null); + objectStore.createDatabase(db1); + StorageDescriptor sd = createFakeSd("mock:/foo"); + HashMap params = new HashMap(); + params.put("EXTERNAL", "false"); + params.put(hive_metastoreConstants.TABLE_IS_MM, "true"); + Table tbl = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, + null, params, null, null, "MANAGED_TABLE"); + objectStore.createTable(tbl); + + // Add write #0 so the watermark wouldn't advance; skip write #1, add #2 at 0, skip #3 + createCompleteTableWrite(mfs, files, 0, time, tbl, HiveMetaStore.MM_WRITE_OPEN); + mfs.addFile(files[1]); + createCompleteTableWrite(mfs, files, 2, time, tbl, HiveMetaStore.MM_WRITE_OPEN); + mfs.addFile(files[3]); + tbl.setMmNextWriteId(4); + objectStore.alterTable(DB1, TABLE1, tbl); + + mct.runOneIteration(objectStore); + List writes = getAbortedWrites(); + assertEquals(0, writes.size()); // Missing write is not aborted before timeout. + time.value = 4; // Advance time. + mct.runOneIteration(objectStore); + writes = getAbortedWrites(); + assertEquals(1, writes.size()); // Missing write is aborted after timeout. + assertEquals(1L, writes.get(0).longValue()); + checkDeletedSet(files, 1); + // However, write #3 was not aborted as we cannot determine when it will time out. + createCompleteTableWrite(mfs, files, 4, time, tbl, HiveMetaStore.MM_WRITE_OPEN); + time.value = 8; + // It will now be aborted, since we have a following write. + mct.runOneIteration(objectStore); + writes = getAbortedWrites(); + assertEquals(2, writes.size()); + assertTrue(writes.contains(Long.valueOf(3))); + checkDeletedSet(files, 1, 3); + + // Commit #0 and #2 and confirm that the watermark advances. + // It will only advance over #1, since #3 was aborted at 8 and grace period has not passed. + time.value = 10; + MTableWrite tw = objectStore.getTableWrite(DB1, TABLE1, 0); + tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_COMMITTED)); + objectStore.updateTableWrite(tw); + tw = objectStore.getTableWrite(DB1, TABLE1, 2); + tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_COMMITTED)); + objectStore.updateTableWrite(tw); + mct.runOneIteration(objectStore); + writes = getAbortedWrites(); + assertEquals(1, writes.size()); + assertEquals(3L, writes.get(0).longValue()); + tbl = objectStore.getTable(DB1, TABLE1); + assertEquals(2L, tbl.getMmWatermarkWriteId()); + + // Now advance the time and see that watermark also advances over #3. + time.value = 16; + mct.runOneIteration(objectStore); + writes = getAbortedWrites(); + assertEquals(0, writes.size()); + tbl = objectStore.getTable(DB1, TABLE1); + assertEquals(3L, tbl.getMmWatermarkWriteId()); + + // Check that the open write gets aborted after some time; then the watermark advances. + time.value = 25; + mct.runOneIteration(objectStore); + writes = getAbortedWrites(); + assertEquals(1, writes.size()); + assertEquals(4L, writes.get(0).longValue()); + time.value = 31; + mct.runOneIteration(objectStore); + tbl = objectStore.getTable(DB1, TABLE1); + assertEquals(4L, tbl.getMmWatermarkWriteId()); + checkDeletedSet(files, 1, 3, 4); // The other two should still be deleted. + + // Finally check that we cannot advance watermark if cleanup fails for some file. + createCompleteTableWrite(mfs, files, 5, time, tbl, HiveMetaStore.MM_WRITE_ABORTED); + createCompleteTableWrite(mfs, files, 6, time, tbl, HiveMetaStore.MM_WRITE_ABORTED); + createCompleteTableWrite(mfs, files, 7, time, tbl, HiveMetaStore.MM_WRITE_COMMITTED); + createCompleteTableWrite(mfs, files, 8, time, tbl, HiveMetaStore.MM_WRITE_ABORTED); + time.value = 37; // Skip the grace period. + files[6].cannotDelete = true; + mct.runOneIteration(objectStore); + checkDeletedSet(files, 1, 3, 4, 5, 8); // The other two should still be deleted. + tbl = objectStore.getTable(DB1, TABLE1); + assertEquals(5L, tbl.getMmWatermarkWriteId()); // Watermark only goes up to 5. + files[6].cannotDelete = false; + mct.runOneIteration(objectStore); + checkDeletedSet(files, 1, 3, 4, 5, 6, 8); + tbl = objectStore.getTable(DB1, TABLE1); + assertEquals(8L, tbl.getMmWatermarkWriteId()); // Now it advances all the way. + + objectStore.dropTable(DB1, TABLE1); + objectStore.dropDatabase(DB1); + } + + private void createCompleteTableWrite(MockFileSystem mfs, MockFile[] files, + int id, LongSupplier time, Table tbl, char state) throws MetaException, InvalidObjectException { + objectStore.createTableWrite(tbl, id, state, time.value); + mfs.addFile(files[id]); + tbl.setMmNextWriteId(id + 1); + objectStore.alterTable(DB1, TABLE1, tbl); + } + + private void checkDeletedSet(MockFile[] files, int... deleted) { + for (int id : deleted) { + assertTrue("File " + id + " not deleted", files[id].isDeleted); + } + int count = 0; + for (MockFile file : files) { + if (file.isDeleted) ++count; + } + assertEquals(deleted.length, count); // Make sure nothing else is deleted. + } + + private List getAbortedWrites() throws MetaException { + return objectStore.getTableWriteIds(DB1, TABLE1, -1, 10, HiveMetaStore.MM_WRITE_ABORTED); + } + + private StorageDescriptor createFakeSd(String location) { + return new StorageDescriptor(null, location, null, null, false, 0, + new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); + } + /** * Tests partition operations @@ -172,7 +341,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO public void testPartitionOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { Database db1 = new Database(DB1, "description", "locationurl", null); objectStore.createDatabase(db1); - StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); + StorageDescriptor sd = createFakeSd("location"); HashMap tableParams = new HashMap(); tableParams.put("EXTERNAL", "false"); FieldSchema partitionKey1 = new FieldSchema("Country", serdeConstants.STRING_TYPE_NAME, ""); @@ -265,7 +434,7 @@ public void testDirectSqlErrorMetrics() throws Exception { MetricsFactory.init(conf); CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance(); - objectStore.new GetDbHelper("foo", null, true, true) { + objectStore.new GetDbHelper("foo", true, true) { @Override protected Database getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { return null; @@ -282,7 +451,7 @@ protected Database getJdoResult(ObjectStore.GetHelper ctx) throws Meta MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.DIRECTSQL_ERRORS, ""); - objectStore.new GetDbHelper("foo", null, true, true) { + objectStore.new GetDbHelper("foo", true, true) { @Override protected Database getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { throw new RuntimeException(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 45a80e3497ef..0e3035aaceaa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -1493,7 +1493,8 @@ private static Table extractMmTable(ReadEntity input) { break; default: return null; } - return (t != null && !t.isTemporary() && AcidUtils.isMmTable(t)) ? t : null; + return (t != null && !t.isTemporary() + && MetaStoreUtils.isMmTable(t.getParameters())) ? t : null; } private CommandProcessorResponse rollback(CommandProcessorResponse cpr) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 70b129e86208..57051456eccd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.metadata.Table; @@ -1082,12 +1083,6 @@ public static boolean isAcidTable(Table table) { return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true"); } - public static boolean isMmTable(Table table) { - // TODO: perhaps it should be a 3rd value for 'transactional'? - String value = table.getProperty(hive_metastoreConstants.TABLE_IS_MM); - return value != null && value.equalsIgnoreCase("true"); - } - /** * Sets the acidOperationalProperties in the configuration object argument. * @param conf Mutable configuration object diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index f3609df7f8c2..10d3f0e54c84 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1478,7 +1478,7 @@ public void loadSinglePartition(Path loadPath, String tableName, boolean hasFollowingStatsTask, Long mmWriteId) throws HiveException { Table tbl = getTable(tableName); boolean isMmTableWrite = (mmWriteId != null); - Preconditions.checkState(isMmTableWrite == AcidUtils.isMmTable(tbl)); + Preconditions.checkState(isMmTableWrite == MetaStoreUtils.isMmTable(tbl.getParameters())); loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs, isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask, mmWriteId); if (isMmTableWrite) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index a0ce3a6cce27..fa7c29b466a5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -6569,7 +6569,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } boolean isNonNativeTable = dest_tab.isNonNative(); - isMmTable = AcidUtils.isMmTable(dest_tab); + isMmTable = MetaStoreUtils.isMmTable(dest_tab.getParameters()); if (isNonNativeTable || isMmTable) { queryTmpdir = dest_path; } else { @@ -6642,7 +6642,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_path = new Path(tabPath.toUri().getScheme(), tabPath.toUri() .getAuthority(), partPath.toUri().getPath()); - isMmTable = AcidUtils.isMmTable(dest_tab); + isMmTable = MetaStoreUtils.isMmTable(dest_tab.getParameters()); queryTmpdir = isMmTable ? dest_path : ctx.getTempDirForPath(dest_path); Utilities.LOG14535.info("createFS for partition specifying " + queryTmpdir + " from " + dest_path); table_desc = Utilities.getTableDesc(dest_tab); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index 4d6e24ef3821..92d9f28c0f18 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -65,7 +65,6 @@ public void setHiveConf(HiveConf conf) { @Override public void setThreadId(int threadId) { this.threadId = threadId; - } @Override diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java index a7ff9a374944..0d177bef5591 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java @@ -33,10 +33,10 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.io.AcidUtils.AcidOperationalProperties; import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat; -import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFile; -import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFileSystem; -import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockPath; import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId; +import org.apache.hive.common.util.MockFileSystem; +import org.apache.hive.common.util.MockFileSystem.MockFile; +import org.apache.hive.common.util.MockFileSystem.MockPath; import org.junit.Assert; import org.junit.Test; @@ -179,7 +179,7 @@ public void testOriginalDeltas() throws Exception { new MockFile("mock:/tbl/part1/delta_050_100/bucket_0", 0, new byte[0]), new MockFile("mock:/tbl/part1/delta_101_101/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = - AcidUtils.getAcidState(new TestInputOutputFormat.MockPath(fs, + AcidUtils.getAcidState(new MockPath(fs, "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); assertEquals(null, dir.getBaseDirectory()); List obsolete = dir.getObsolete(); @@ -221,7 +221,7 @@ public void testBaseDeltas() throws Exception { new MockFile("mock:/tbl/part1/delta_050_105/bucket_0", 0, new byte[0]), new MockFile("mock:/tbl/part1/delta_90_120/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = - AcidUtils.getAcidState(new TestInputOutputFormat.MockPath(fs, + AcidUtils.getAcidState(new MockPath(fs, "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_49", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); @@ -517,7 +517,7 @@ public void testBaseWithDeleteDeltas() throws Exception { new MockFile("mock:/tbl/part1/delete_delta_050_105/bucket_0", 0, new byte[0]), new MockFile("mock:/tbl/part1/delete_delta_110_110/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = - AcidUtils.getAcidState(new TestInputOutputFormat.MockPath(fs, + AcidUtils.getAcidState(new MockPath(fs, "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_49", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 2c1bb6fe2f09..28a4f9db0130 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -21,10 +21,7 @@ import java.io.DataInput; import java.io.DataOutput; -import java.io.FileNotFoundException; import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; import java.sql.Date; import java.sql.Timestamp; @@ -32,27 +29,16 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; -import java.util.Map; import java.util.Properties; -import java.util.Set; import java.util.TimeZone; -import java.util.TreeSet; import org.apache.commons.codec.binary.Base64; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.BlockLocation; -import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; @@ -113,7 +99,11 @@ import org.apache.hadoop.mapred.RecordWriter; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Progressable; +import org.apache.hive.common.util.MockFileSystem; +import org.apache.hive.common.util.MockFileSystem.MockBlock; +import org.apache.hive.common.util.MockFileSystem.MockFile; +import org.apache.hive.common.util.MockFileSystem.MockOutputStream; +import org.apache.hive.common.util.MockFileSystem.MockPath; import org.apache.orc.OrcProto; import org.junit.Before; import org.junit.Rule; @@ -921,534 +911,6 @@ private List> createSplitStrategies( null, null, true); } - public static class MockBlock { - int offset; - int length; - final String[] hosts; - - public MockBlock(String... hosts) { - this.hosts = hosts; - } - - public void setOffset(int offset) { - this.offset = offset; - } - - public void setLength(int length) { - this.length = length; - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append("block{offset: "); - buffer.append(offset); - buffer.append(", length: "); - buffer.append(length); - buffer.append(", hosts: ["); - for(int i=0; i < hosts.length; i++) { - if (i != 0) { - buffer.append(", "); - } - buffer.append(hosts[i]); - } - buffer.append("]}"); - return buffer.toString(); - } - } - - public static class MockFile { - final Path path; - int blockSize; - int length; - MockBlock[] blocks; - byte[] content; - - public MockFile(String path, int blockSize, byte[] content, - MockBlock... blocks) { - this.path = new Path(path); - this.blockSize = blockSize; - this.blocks = blocks; - this.content = content; - this.length = content.length; - int offset = 0; - for(MockBlock block: blocks) { - block.offset = offset; - block.length = Math.min(length - offset, blockSize); - offset += block.length; - } - } - - @Override - public int hashCode() { - return path.hashCode() + 31 * length; - } - - @Override - public boolean equals(final Object obj) { - if (!(obj instanceof MockFile)) { return false; } - return ((MockFile) obj).path.equals(this.path) && ((MockFile) obj).length == this.length; - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append("mockFile{path: "); - buffer.append(path.toString()); - buffer.append(", blkSize: "); - buffer.append(blockSize); - buffer.append(", len: "); - buffer.append(length); - buffer.append(", blocks: ["); - for(int i=0; i < blocks.length; i++) { - if (i != 0) { - buffer.append(", "); - } - buffer.append(blocks[i]); - } - buffer.append("]}"); - return buffer.toString(); - } - } - - static class MockInputStream extends FSInputStream { - final MockFile file; - int offset = 0; - - public MockInputStream(MockFile file) throws IOException { - this.file = file; - } - - @Override - public void seek(long offset) throws IOException { - this.offset = (int) offset; - } - - @Override - public long getPos() throws IOException { - return offset; - } - - @Override - public boolean seekToNewSource(long l) throws IOException { - return false; - } - - @Override - public int read() throws IOException { - if (offset < file.length) { - return file.content[offset++] & 0xff; - } - return -1; - } - } - - public static class MockPath extends Path { - private final FileSystem fs; - public MockPath(FileSystem fs, String path) { - super(path); - this.fs = fs; - } - @Override - public FileSystem getFileSystem(Configuration conf) { - return fs; - } - } - - public static class MockOutputStream extends FSDataOutputStream { - private final MockFile file; - - public MockOutputStream(MockFile file) throws IOException { - super(new DataOutputBuffer(), null); - this.file = file; - } - - /** - * Set the blocks and their location for the file. - * Must be called after the stream is closed or the block length will be - * wrong. - * @param blocks the list of blocks - */ - public void setBlocks(MockBlock... blocks) { - file.blocks = blocks; - int offset = 0; - int i = 0; - while (offset < file.length && i < blocks.length) { - blocks[i].offset = offset; - blocks[i].length = Math.min(file.length - offset, file.blockSize); - offset += blocks[i].length; - i += 1; - } - } - - @Override - public void close() throws IOException { - super.close(); - DataOutputBuffer buf = (DataOutputBuffer) getWrappedStream(); - file.length = buf.getLength(); - file.content = new byte[file.length]; - MockBlock block = new MockBlock("host1"); - block.setLength(file.length); - setBlocks(block); - System.arraycopy(buf.getData(), 0, file.content, 0, file.length); - } - - @Override - public String toString() { - return "Out stream to " + file.toString(); - } - } - - public static class MockFileSystem extends FileSystem { - final List files = new ArrayList(); - final Map fileStatusMap = new HashMap<>(); - Path workingDir = new Path("/"); - // statics for when the mock fs is created via FileSystem.get - private static String blockedUgi = null; - private final static List globalFiles = new ArrayList(); - protected Statistics statistics; - - public MockFileSystem() { - // empty - } - - @Override - public void initialize(URI uri, Configuration conf) { - setConf(conf); - statistics = getStatistics("mock", getClass()); - } - - public MockFileSystem(Configuration conf, MockFile... files) { - setConf(conf); - this.files.addAll(Arrays.asList(files)); - statistics = getStatistics("mock", getClass()); - } - - public static void setBlockedUgi(String s) { - blockedUgi = s; - } - - void clear() { - files.clear(); - } - - @Override - public URI getUri() { - try { - return new URI("mock:///"); - } catch (URISyntaxException err) { - throw new IllegalArgumentException("huh?", err); - } - } - - // increments file modification time - public void touch(MockFile file) { - if (fileStatusMap.containsKey(file)) { - FileStatus fileStatus = fileStatusMap.get(file); - FileStatus fileStatusNew = new FileStatus(fileStatus.getLen(), fileStatus.isDirectory(), - fileStatus.getReplication(), fileStatus.getBlockSize(), - fileStatus.getModificationTime() + 1, fileStatus.getAccessTime(), - fileStatus.getPermission(), fileStatus.getOwner(), fileStatus.getGroup(), - fileStatus.getPath()); - fileStatusMap.put(file, fileStatusNew); - } - } - - @SuppressWarnings("serial") - public static class MockAccessDenied extends IOException { - } - - @Override - public FSDataInputStream open(Path path, int i) throws IOException { - statistics.incrementReadOps(1); - checkAccess(); - MockFile file = findFile(path); - if (file != null) return new FSDataInputStream(new MockInputStream(file)); - throw new IOException("File not found: " + path); - } - - private MockFile findFile(Path path) { - for (MockFile file: files) { - if (file.path.equals(path)) { - return file; - } - } - for (MockFile file: globalFiles) { - if (file.path.equals(path)) { - return file; - } - } - return null; - } - - private void checkAccess() throws IOException { - if (blockedUgi == null) return; - if (!blockedUgi.equals(UserGroupInformation.getCurrentUser().getShortUserName())) return; - throw new MockAccessDenied(); - } - - @Override - public FSDataOutputStream create(Path path, FsPermission fsPermission, - boolean overwrite, int bufferSize, - short replication, long blockSize, - Progressable progressable - ) throws IOException { - statistics.incrementWriteOps(1); - checkAccess(); - MockFile file = findFile(path); - if (file == null) { - file = new MockFile(path.toString(), (int) blockSize, new byte[0]); - files.add(file); - } - return new MockOutputStream(file); - } - - @Override - public FSDataOutputStream append(Path path, int bufferSize, - Progressable progressable - ) throws IOException { - statistics.incrementWriteOps(1); - checkAccess(); - return create(path, FsPermission.getDefault(), true, bufferSize, - (short) 3, 256 * 1024, progressable); - } - - @Override - public boolean rename(Path path, Path path2) throws IOException { - statistics.incrementWriteOps(1); - checkAccess(); - return false; - } - - @Override - public boolean delete(Path path) throws IOException { - statistics.incrementWriteOps(1); - checkAccess(); - return false; - } - - @Override - public boolean delete(Path path, boolean b) throws IOException { - statistics.incrementWriteOps(1); - checkAccess(); - return false; - } - - @Override - public RemoteIterator listLocatedStatus(final Path f) - throws IOException { - return new RemoteIterator() { - private Iterator iterator = listLocatedFileStatuses(f).iterator(); - - @Override - public boolean hasNext() throws IOException { - return iterator.hasNext(); - } - - @Override - public LocatedFileStatus next() throws IOException { - return iterator.next(); - } - }; - } - - private List listLocatedFileStatuses(Path path) throws IOException { - statistics.incrementReadOps(1); - checkAccess(); - path = path.makeQualified(this); - List result = new ArrayList<>(); - String pathname = path.toString(); - String pathnameAsDir = pathname + "/"; - Set dirs = new TreeSet(); - MockFile file = findFile(path); - if (file != null) { - result.add(createLocatedStatus(file)); - return result; - } - findMatchingLocatedFiles(files, pathnameAsDir, dirs, result); - findMatchingLocatedFiles(globalFiles, pathnameAsDir, dirs, result); - // for each directory add it once - for(String dir: dirs) { - result.add(createLocatedDirectory(new MockPath(this, pathnameAsDir + dir))); - } - return result; - } - - @Override - public FileStatus[] listStatus(Path path) throws IOException { - statistics.incrementReadOps(1); - checkAccess(); - path = path.makeQualified(this); - List result = new ArrayList(); - String pathname = path.toString(); - String pathnameAsDir = pathname + "/"; - Set dirs = new TreeSet(); - MockFile file = findFile(path); - if (file != null) { - return new FileStatus[]{createStatus(file)}; - } - findMatchingFiles(files, pathnameAsDir, dirs, result); - findMatchingFiles(globalFiles, pathnameAsDir, dirs, result); - // for each directory add it once - for(String dir: dirs) { - result.add(createDirectory(new MockPath(this, pathnameAsDir + dir))); - } - return result.toArray(new FileStatus[result.size()]); - } - - private void findMatchingFiles( - List files, String pathnameAsDir, Set dirs, List result) { - for (MockFile file: files) { - String filename = file.path.toString(); - if (filename.startsWith(pathnameAsDir)) { - String tail = filename.substring(pathnameAsDir.length()); - int nextSlash = tail.indexOf('/'); - if (nextSlash > 0) { - dirs.add(tail.substring(0, nextSlash)); - } else { - result.add(createStatus(file)); - } - } - } - } - - private void findMatchingLocatedFiles( - List files, String pathnameAsDir, Set dirs, List result) - throws IOException { - for (MockFile file: files) { - String filename = file.path.toString(); - if (filename.startsWith(pathnameAsDir)) { - String tail = filename.substring(pathnameAsDir.length()); - int nextSlash = tail.indexOf('/'); - if (nextSlash > 0) { - dirs.add(tail.substring(0, nextSlash)); - } else { - result.add(createLocatedStatus(file)); - } - } - } - } - - @Override - public void setWorkingDirectory(Path path) { - workingDir = path; - } - - @Override - public Path getWorkingDirectory() { - return workingDir; - } - - @Override - public boolean mkdirs(Path path, FsPermission fsPermission) { - statistics.incrementWriteOps(1); - return false; - } - - private FileStatus createStatus(MockFile file) { - if (fileStatusMap.containsKey(file)) { - return fileStatusMap.get(file); - } - FileStatus fileStatus = new FileStatus(file.length, false, 1, file.blockSize, 0, 0, - FsPermission.createImmutable((short) 644), "owen", "group", - file.path); - fileStatusMap.put(file, fileStatus); - return fileStatus; - } - - private FileStatus createDirectory(Path dir) { - return new FileStatus(0, true, 0, 0, 0, 0, - FsPermission.createImmutable((short) 755), "owen", "group", dir); - } - - private LocatedFileStatus createLocatedStatus(MockFile file) throws IOException { - FileStatus fileStatus = createStatus(file); - return new LocatedFileStatus(fileStatus, - getFileBlockLocationsImpl(fileStatus, 0, fileStatus.getLen(), false)); - } - - private LocatedFileStatus createLocatedDirectory(Path dir) throws IOException { - FileStatus fileStatus = createDirectory(dir); - return new LocatedFileStatus(fileStatus, - getFileBlockLocationsImpl(fileStatus, 0, fileStatus.getLen(), false)); - } - - @Override - public FileStatus getFileStatus(Path path) throws IOException { - statistics.incrementReadOps(1); - checkAccess(); - path = path.makeQualified(this); - String pathnameAsDir = path.toString() + "/"; - MockFile file = findFile(path); - if (file != null) return createStatus(file); - for (MockFile dir : files) { - if (dir.path.toString().startsWith(pathnameAsDir)) { - return createDirectory(path); - } - } - for (MockFile dir : globalFiles) { - if (dir.path.toString().startsWith(pathnameAsDir)) { - return createDirectory(path); - } - } - throw new FileNotFoundException("File " + path + " does not exist"); - } - - @Override - public BlockLocation[] getFileBlockLocations(FileStatus stat, - long start, long len) throws IOException { - return getFileBlockLocationsImpl(stat, start, len, true); - } - - private BlockLocation[] getFileBlockLocationsImpl(final FileStatus stat, final long start, - final long len, - final boolean updateStats) throws IOException { - if (updateStats) { - statistics.incrementReadOps(1); - } - checkAccess(); - List result = new ArrayList(); - MockFile file = findFile(stat.getPath()); - if (file != null) { - for(MockBlock block: file.blocks) { - if (OrcInputFormat.SplitGenerator.getOverlap(block.offset, - block.length, start, len) > 0) { - String[] topology = new String[block.hosts.length]; - for(int i=0; i < topology.length; ++i) { - topology[i] = "/rack/ " + block.hosts[i]; - } - result.add(new BlockLocation(block.hosts, block.hosts, - topology, block.offset, block.length)); - } - } - return result.toArray(new BlockLocation[result.size()]); - } - return new BlockLocation[0]; - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append("mockFs{files:["); - for(int i=0; i < files.size(); ++i) { - if (i != 0) { - buffer.append(", "); - } - buffer.append(files.get(i)); - } - buffer.append("]}"); - return buffer.toString(); - } - - public static void addGlobalFile(MockFile mockFile) { - globalFiles.add(mockFile); - } - - public static void clearGlobalFiles() { - globalFiles.clear(); - } - } - static void fill(DataOutputBuffer out, long length) throws IOException { for(int i=0; i < length; ++i) { out.write(0); @@ -2569,6 +2031,7 @@ public void testDoAs() throws Exception { conf.setClass("fs.mock.impl", MockFileSystem.class, FileSystem.class); String badUser = UserGroupInformation.getCurrentUser().getShortUserName() + "-foo"; MockFileSystem.setBlockedUgi(badUser); + // TODO: could we instead get FS from path here and add normal files for every UGI? MockFileSystem.clearGlobalFiles(); OrcInputFormat.Context.resetThreadPool(); // We need the size above to take effect. try { From ad3df23b9e9ecf0ecbee11b1a143658364b45e16 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Mon, 3 Oct 2016 16:43:45 -0700 Subject: [PATCH 06/24] HIVE-14641 : handle writing to dynamic partitions (Sergey Shelukhin) --- .../hadoop/hive/ql/exec/FileSinkOperator.java | 128 ++--- .../apache/hadoop/hive/ql/exec/MoveTask.java | 3 +- .../apache/hadoop/hive/ql/metadata/Hive.java | 9 +- ql/src/test/queries/clientpositive/mm_all.q | 162 ++++-- .../test/queries/clientpositive/mm_current.q | 37 +- .../results/clientpositive/llap/mm_all.q.out | 467 ++++++++++++++++++ .../clientpositive/llap/mm_current.q.out | 235 +++------ 7 files changed, 749 insertions(+), 292 deletions(-) create mode 100644 ql/src/test/results/clientpositive/llap/mm_all.q.out diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 6ea1a98fc1b5..f11a7c376c60 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -225,11 +225,7 @@ public void closeWriters(boolean abort) throws HiveException { } } - private void commit(FileSystem fs) throws HiveException { - List commitPaths = null; - if (isMmTable) { - commitPaths = new ArrayList<>(); - } + private void commit(FileSystem fs, List commitPaths) throws HiveException { for (int idx = 0; idx < outPaths.length; ++idx) { try { commitOneOutPath(idx, fs, commitPaths); @@ -238,21 +234,6 @@ private void commit(FileSystem fs) throws HiveException { outPaths[idx] + " to: " + finalPaths[idx], e); } } - if (isMmTable) { - Path manifestPath = new Path(specPath, "_tmp." + ValidWriteIds.getMmFilePrefix( - conf.getMmWriteId()) + "_" + taskId + MANIFEST_EXTENSION); - Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths); - try { - try (FSDataOutputStream out = fs.create(manifestPath)) { - out.writeInt(commitPaths.size()); - for (Path path : commitPaths) { - out.writeUTF(path.toString()); - } - } - } catch (IOException e) { - throw new HiveException(e); - } - } } private void commitOneOutPath(int idx, FileSystem fs, List commitPaths) @@ -328,8 +309,9 @@ public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeT if (!bDynParts && !isSkewedStoredAsSubDirectories) { finalPaths[filesIdx] = getFinalPath(subdirPath, specPath, extension); } else { - // TODO# wrong! special case #N bucketing - finalPaths[filesIdx] = getFinalPath(subdirPath, specPath, extension); + // TODO# does this need extra special handing for bucketing? + // Note: tmpPath here has the correct partition key + finalPaths[filesIdx] = getFinalPath(subdirPath, tmpPath, extension); } outPaths[filesIdx] = finalPaths[filesIdx]; } @@ -921,7 +903,7 @@ private FSPaths createNewPaths(String dirName) throws HiveException { FSPaths fsp2 = new FSPaths(specPath, conf.isMmTable()); // TODO# this will break fsp2.configureDynPartPath(dirName, childSpecPathDynLinkedPartitions); Utilities.LOG14535.info("creating new paths for " + dirName + ", childSpec " + childSpecPathDynLinkedPartitions - + ": tmpPath " + fsp2.getTmpPath() + ", task path " + fsp2.getTaskOutputTempPath()); + + ": tmpPath " + fsp2.getTmpPath() + ", task path " + fsp2.getTaskOutputTempPath()/*, new Exception()*/); if(!conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { createBucketFiles(fsp2); valToPaths.put(dirName, fsp2); @@ -1104,6 +1086,7 @@ public void closeOp(boolean abort) throws HiveException { throw new HiveException(e); } } + List commitPaths = new ArrayList<>(); for (FSPaths fsp : valToPaths.values()) { fsp.closeWriters(abort); // before closing the operator check if statistics gathering is requested @@ -1139,7 +1122,27 @@ public void closeOp(boolean abort) throws HiveException { } if (isNativeTable) { - fsp.commit(fs); + fsp.commit(fs, commitPaths); + } + } + if (!commitPaths.isEmpty()) { + Path manifestPath = new Path(specPath, "_tmp." + ValidWriteIds.getMmFilePrefix( + conf.getMmWriteId()) + "_" + taskId + MANIFEST_EXTENSION); + Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths); + try { + // Don't overwrite the manifest... should fail if we have collisions. + // We assume one FSOP per task (per specPath), so we create it in specPath. + try (FSDataOutputStream out = fs.create(manifestPath, false)) { + if (out == null) { + throw new HiveException("Failed to create manifest at " + manifestPath); + } + out.writeInt(commitPaths.size()); + for (Path path : commitPaths) { + out.writeUTF(path.toString()); + } + } + } catch (IOException e) { + throw new HiveException(e); } } // Only publish stats if this operator's flag was set to gather stats @@ -1197,30 +1200,27 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, DynamicPartitionCtx dpCtx, FileSinkDesc conf, Reporter reporter) throws IOException, HiveException { FileSystem fs = specPath.getFileSystem(hconf); - int targetLevel = (dpCtx == null) ? 1 : dpCtx.getNumDPCols(); + // Manifests would be at the root level, but the results at target level. + // TODO# special case - doesn't take bucketing into account + int targetLevel = (dpCtx == null) ? 1 : (dpCtx.getNumDPCols() + 1); + int manifestLevel = 1; + ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(conf.getMmWriteId(), true); if (!success) { - FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse(specPath, targetLevel, fs, - new ValidWriteIds.IdPathFilter(conf.getMmWriteId(), true)); - for (FileStatus status : statuses) { - Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); - tryDelete(fs, status.getPath()); - } + deleteMatchingFiles(specPath, fs, targetLevel, filter); + deleteMatchingFiles(specPath, fs, manifestLevel, filter); return; } - FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse(specPath, targetLevel, fs, - new ValidWriteIds.IdPathFilter(conf.getMmWriteId(), true)); - if (statuses == null) return; - LinkedList results = new LinkedList<>(); - List manifests = new ArrayList<>(statuses.length); - for (FileStatus status : statuses) { - if (status.getPath().getName().endsWith(MANIFEST_EXTENSION)) { - manifests.add(status.getPath()); - } else if (!status.isDirectory()) { - Path path = status.getPath(); - Utilities.LOG14535.warn("Unknown file found - neither a manifest nor directory: " + path); - tryDelete(fs, path); - } else { - results.addAll(Lists.newArrayList(fs.listStatus(status.getPath()))); + FileStatus[] files = HiveStatsUtils.getFileStatusRecurse(specPath, manifestLevel, fs, filter); + List manifests = new ArrayList<>(files.length); + if (files != null) { + for (FileStatus status : files) { + if (status.getPath().getName().endsWith(MANIFEST_EXTENSION)) { + manifests.add(status.getPath()); + } else if (!status.isDirectory()) { + Path path = status.getPath(); + Utilities.LOG14535.warn("Unknown file found - neither a manifest nor directory: " + path); + tryDelete(fs, path); + } } } HashSet committed = new HashSet<>(); @@ -1235,18 +1235,27 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, } } } - Iterator iter = results.iterator(); - while (iter.hasNext()) { - FileStatus rfs = iter.next(); - if (!committed.remove(rfs.getPath().toString())) { - iter.remove(); - Utilities.LOG14535.info("Deleting " + rfs.getPath() + " that was not committed"); - // We should actually succeed here - if we fail, don't commit the query. - if (!fs.delete(rfs.getPath(), true)) { - throw new HiveException("Failed to delete an uncommitted path " + rfs.getPath()); + + files = HiveStatsUtils.getFileStatusRecurse(specPath, targetLevel, fs, filter); + LinkedList results = new LinkedList<>(); + for (FileStatus status : files) { + if (!status.isDirectory()) { + Path path = status.getPath(); + Utilities.LOG14535.warn("Unknown file found - neither a manifest nor directory: " + path); + tryDelete(fs, path); + } else { + for (FileStatus child : fs.listStatus(status.getPath())) { + Path path = child.getPath(); + if (committed.remove(path.toString())) continue; // A good file. + Utilities.LOG14535.info("Deleting " + path + " that was not committed"); + // We should actually succeed here - if we fail, don't commit the query. + if (!fs.delete(path, true)) { + throw new HiveException("Failed to delete an uncommitted path " + path); + } } } } + if (!committed.isEmpty()) { throw new HiveException("The following files were committed but not found: " + committed); } @@ -1258,7 +1267,7 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, if (results.isEmpty()) return; FileStatus[] finalResults = results.toArray(new FileStatus[results.size()]); - // TODO# dp will break - removeTempOrDuplicateFiles assumes dirs in results. Why? We recurse... + // TODO# dp may break - removeTempOrDuplicateFiles assumes dirs in results. Why? We recurse... List emptyBuckets = Utilities.removeTempOrDuplicateFiles( fs, finalResults, dpCtx, conf, hconf); // create empty buckets if necessary @@ -1267,6 +1276,15 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, } } + private void deleteMatchingFiles(Path specPath, FileSystem fs, + int targetLevel, ValidWriteIds.IdPathFilter filter) throws IOException { + for (FileStatus status : HiveStatsUtils.getFileStatusRecurse(specPath, targetLevel, fs, + filter)) { + Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); + tryDelete(fs, status.getPath()); + } + } + private void tryDelete(FileSystem fs, Path path) { try { fs.delete(path, true); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 3be21c469d73..538bf79b3aa6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -435,7 +435,8 @@ private DataContainer handleDynParts(Hive db, Table table, LoadTableDesc tbd, isSkewedStoredAsDirs(tbd), work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, SessionState.get().getTxnMgr().getCurrentTxnId(), hasFollowingStatsTask(), - work.getLoadTableWork().getWriteType()); + work.getLoadTableWork().getWriteType(), + tbd.getMmWriteId()); console.printInfo("\t Time taken to load dynamic partitions: " + (System.currentTimeMillis() - startTime)/1000.0 + " seconds"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 68d59aab6060..38b434dcddf2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1850,7 +1850,7 @@ private Set getValidPartitionsInPath(int numDP, Path loadPath) throws Hive public Map, Partition> loadDynamicPartitions(final Path loadPath, final String tableName, final Map partSpec, final boolean replace, final int numDP, final boolean listBucketingEnabled, final boolean isAcid, final long txnId, - final boolean hasFollowingStatsTask, final AcidUtils.Operation operation) + final boolean hasFollowingStatsTask, final AcidUtils.Operation operation, final Long mmWriteId) throws HiveException { final Map, Partition> partitionsMap = @@ -1895,7 +1895,7 @@ public Void call() throws Exception { Utilities.LOG14535.info("loadPartition called for DPP from " + partPath + " to " + tbl.getTableName()); Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, replace, true, listBucketingEnabled, - false, isAcid, hasFollowingStatsTask, null); // TODO# special case #N + false, isAcid, hasFollowingStatsTask, mmWriteId); partitionsMap.put(fullPartSpec, newPartition); if (inPlaceEligible) { @@ -1927,7 +1927,10 @@ public Void call() throws Exception { for (Future future : futures) { future.get(); } - // TODO# special case #N - DP - we would commit the txn to metastore here + if (mmWriteId != null) { + // Commit after we have processed all the partitions. + commitMmTableWrite(tbl, mmWriteId); + } } catch (InterruptedException | ExecutionException e) { LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); //cancel other futures diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index aaf8d48770ae..59171afdfb76 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -1,63 +1,117 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; -set hive.exec.dynamic.partition.mode=nonstrict; set hive.fetch.task.conversion=none; -drop table simple_mm; -drop table partunion_mm; -drop table merge_mm; -drop table ctas_mm; -drop table T1; -drop table T2; -drop table skew_mm; - - -create table simple_mm(key int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); -insert into table simple_mm partition(key_mm='455') select key from src limit 3; - -create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; - -create table partunion_mm(id_mm int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); - - -insert into table partunion_mm partition(key_mm) -select temps.* from ( -select key as key_mm, key from ctas_mm -union all -select key as key_mm, key from simple_mm ) temps; +-- Force multiple writers when reading +drop table intermediate; +create table intermediate(key int) partitioned by (p int) stored as orc; +insert into table intermediate partition(p='455') select key from src limit 2; +insert into table intermediate partition(p='456') select key from src limit 2; + +drop table part_mm; +create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true'); +explain insert into table part_mm partition(key_mm='455') select key from intermediate; +insert into table part_mm partition(key_mm='455') select key from intermediate; +insert into table part_mm partition(key_mm='456') select key from intermediate; +insert into table part_mm partition(key_mm='455') select key from intermediate; +select * from part_mm; +drop table part_mm; -set hive.merge.mapredfiles=true; -set hive.merge.sparkfiles=true; -set hive.merge.tezfiles=true; - -CREATE TABLE merge_mm (key INT, value STRING) - PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties ('hivecommit'='true'); - -EXPLAIN -INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) - SELECT key, value, PMOD(HASH(key), 2) as part - FROM src; - -INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) - SELECT key, value, PMOD(HASH(key), 2) as part - FROM src; - - -set hive.optimize.skewjoin.compiletime = true; --- the test case is wrong? +drop table simple_mm; +create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true'); +insert into table simple_mm select key from intermediate; +insert overwrite table simple_mm select key from intermediate; +select * from simple_mm; +drop table simple_mm; -CREATE TABLE T1(key STRING, val STRING) -SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; -CREATE TABLE T2(key STRING, val STRING) -SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; -LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; -EXPLAIN -SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +-- simple DP (no bucketing, no sorting?) +drop table dp_no_mm; +drop table dp_mm; -create table skew_mm(k1 string, k2 string, k3 string, k4 string) SKEWED BY (key) ON ((2)) tblproperties ('hivecommit'='true'); -INSERT OVERWRITE TABLE skew_mm -SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; +set hive.exec.dynamic.partition.mode=nonstrict; --- TODO load, acid, etc +set hive.merge.mapredfiles=false; +set hive.merge.sparkfiles=false; +set hive.merge.tezfiles=false; + +create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc; +create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc + tblproperties ('hivecommit'='true'); + +insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate; + +insert into table dp_mm partition (key1='123', key2) select key, key from intermediate; + +select * from dp_no_mm; +select * from dp_mm; + +drop table dp_no_mm; +drop table dp_mm; + + + +-- future + + + + + +--drop table partunion_mm; +--drop table merge_mm; +--drop table ctas_mm; +--drop table T1; +--drop table T2; +--drop table skew_mm; +-- +-- +--create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; +-- +--create table partunion_mm(id_mm int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); +-- +-- +--insert into table partunion_mm partition(key_mm) +--select temps.* from ( +--select key as key_mm, key from ctas_mm +--union all +--select key as key_mm, key from simple_mm ) temps; +-- +--set hive.merge.mapredfiles=true; +--set hive.merge.sparkfiles=true; +--set hive.merge.tezfiles=true; +-- +--CREATE TABLE merge_mm (key INT, value STRING) +-- PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties ('hivecommit'='true'); +-- +--EXPLAIN +--INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) +-- SELECT key, value, PMOD(HASH(key), 2) as part +-- FROM src; +-- +--INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) +-- SELECT key, value, PMOD(HASH(key), 2) as part +-- FROM src; +-- +-- +--set hive.optimize.skewjoin.compiletime = true; +---- the test case is wrong? +-- +--CREATE TABLE T1(key STRING, val STRING) +--SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; +--LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +--CREATE TABLE T2(key STRING, val STRING) +--SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; +--LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +-- +--EXPLAIN +--SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +-- +--create table skew_mm(k1 string, k2 string, k3 string, k4 string) SKEWED BY (key) ON ((2)) tblproperties ('hivecommit'='true'); +--INSERT OVERWRITE TABLE skew_mm +--SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; +-- +---- TODO load, acid, etc +-- +-- + +drop table intermediate; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index 7c3e13807b1e..b551176e990b 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -6,29 +6,34 @@ set tez.grouping.min-size=1; set tez.grouping.max-size=2; set hive.tez.auto.reducer.parallelism=false; -drop table part_mm; -drop table simple_mm; drop table intermediate; - create table intermediate(key int) partitioned by (p int) stored as orc; insert into table intermediate partition(p='455') select key from src limit 2; insert into table intermediate partition(p='456') select key from src limit 2; - -create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true'); -explain insert into table part_mm partition(key_mm='455') select key from intermediate; -insert into table part_mm partition(key_mm='455') select key from intermediate; -insert into table part_mm partition(key_mm='456') select key from intermediate; -insert into table part_mm partition(key_mm='455') select key from intermediate; -select * from part_mm; -create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true'); -insert into table simple_mm select key from intermediate; -insert overwrite table simple_mm select key from intermediate; -select * from simple_mm; +drop table dp_no_mm; +drop table dp_mm; + +set hive.merge.mapredfiles=false; +set hive.merge.sparkfiles=false; +set hive.merge.tezfiles=false; + +create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc; +create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc + tblproperties ('hivecommit'='true'); + +insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate; + +insert into table dp_mm partition (key1='123', key2) select key, key from intermediate; + +select * from dp_no_mm; +select * from dp_mm; + +drop table dp_no_mm; +drop table dp_mm; -drop table part_mm; -drop table simple_mm; drop table intermediate; + diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out new file mode 100644 index 000000000000..b0c9c0aebaaf --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -0,0 +1,467 @@ +PREHOOK: query: -- Force multiple writers when reading +drop table intermediate +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Force multiple writers when reading +drop table intermediate +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermediate +POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermediate +PREHOOK: query: insert into table intermediate partition(p='455') select key from src limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=455 +POSTHOOK: query: insert into table intermediate partition(p='455') select key from src limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=455 +POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='456') select key from src limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=456 +POSTHOOK: query: insert into table intermediate partition(p='456') select key from src limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=456 +POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: drop table part_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table part_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@part_mm +POSTHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@part_mm +PREHOOK: query: explain insert into table part_mm partition(key_mm='455') select key from intermediate +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table part_mm partition(key_mm='455') select key from intermediate +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: intermediate + Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.part_mm + Execution mode: llap + LLAP IO: all inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + key_mm 455 + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.part_mm + micromanaged table: true + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table part_mm partition(key_mm='456') select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@part_mm@key_mm=456 +POSTHOOK: query: insert into table part_mm partition(key_mm='456') select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@part_mm@key_mm=456 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from part_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@part_mm +PREHOOK: Input: default@part_mm@key_mm=455 +PREHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from part_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part_mm +POSTHOOK: Input: default@part_mm@key_mm=455 +POSTHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +238 455 +86 455 +238 455 +86 455 +238 455 +86 455 +238 455 +86 455 +238 456 +86 456 +238 456 +86 456 +PREHOOK: query: drop table part_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@part_mm +PREHOOK: Output: default@part_mm +POSTHOOK: query: drop table part_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@part_mm +POSTHOOK: Output: default@part_mm +PREHOOK: query: drop table simple_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table simple_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@simple_mm +POSTHOOK: query: create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@simple_mm +PREHOOK: query: insert into table simple_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@simple_mm +POSTHOOK: query: insert into table simple_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@simple_mm +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert overwrite table simple_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@simple_mm +POSTHOOK: query: insert overwrite table simple_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@simple_mm +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from simple_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@simple_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from simple_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@simple_mm +#### A masked pattern was here #### +238 +86 +238 +86 +PREHOOK: query: drop table simple_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@simple_mm +PREHOOK: Output: default@simple_mm +POSTHOOK: query: drop table simple_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@simple_mm +POSTHOOK: Output: default@simple_mm +PREHOOK: query: -- simple DP (no bucketing, no sorting?) +drop table dp_no_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- simple DP (no bucketing, no sorting?) +drop table dp_no_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table dp_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table dp_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@dp_no_mm +POSTHOOK: query: create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dp_no_mm +PREHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc + tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@dp_mm +POSTHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc + tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dp_mm +PREHOOK: query: insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@dp_no_mm@key1=123 +POSTHOOK: query: insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@dp_no_mm@key1=123/key2=238 +POSTHOOK: Output: default@dp_no_mm@key1=123/key2=86 +POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=238).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=86).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@dp_mm@key1=123 +POSTHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@dp_mm@key1=123/key2=238 +POSTHOOK: Output: default@dp_mm@key1=123/key2=86 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=238).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=86).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from dp_no_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@dp_no_mm +PREHOOK: Input: default@dp_no_mm@key1=123/key2=238 +PREHOOK: Input: default@dp_no_mm@key1=123/key2=86 +#### A masked pattern was here #### +POSTHOOK: query: select * from dp_no_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dp_no_mm +POSTHOOK: Input: default@dp_no_mm@key1=123/key2=238 +POSTHOOK: Input: default@dp_no_mm@key1=123/key2=86 +#### A masked pattern was here #### +238 123 238 +238 123 238 +86 123 86 +86 123 86 +PREHOOK: query: select * from dp_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@dp_mm +PREHOOK: Input: default@dp_mm@key1=123/key2=238 +PREHOOK: Input: default@dp_mm@key1=123/key2=86 +#### A masked pattern was here #### +POSTHOOK: query: select * from dp_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dp_mm +POSTHOOK: Input: default@dp_mm@key1=123/key2=238 +POSTHOOK: Input: default@dp_mm@key1=123/key2=86 +#### A masked pattern was here #### +238 123 238 +238 123 238 +86 123 86 +86 123 86 +PREHOOK: query: drop table dp_no_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@dp_no_mm +PREHOOK: Output: default@dp_no_mm +POSTHOOK: query: drop table dp_no_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@dp_no_mm +POSTHOOK: Output: default@dp_no_mm +PREHOOK: query: drop table dp_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@dp_mm +PREHOOK: Output: default@dp_mm +POSTHOOK: query: drop table dp_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@dp_mm +POSTHOOK: Output: default@dp_mm +PREHOOK: query: -- future + + + + + +--drop table partunion_mm; +--drop table merge_mm; +--drop table ctas_mm; +--drop table T1; +--drop table T2; +--drop table skew_mm; +-- +-- +--create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; +-- +--create table partunion_mm(id_mm int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); +-- +-- +--insert into table partunion_mm partition(key_mm) +--select temps.* from ( +--select key as key_mm, key from ctas_mm +--union all +--select key as key_mm, key from simple_mm ) temps; +-- +--set hive.merge.mapredfiles=true; +--set hive.merge.sparkfiles=true; +--set hive.merge.tezfiles=true; +-- +--CREATE TABLE merge_mm (key INT, value STRING) +-- PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties ('hivecommit'='true'); +-- +--EXPLAIN +--INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) +-- SELECT key, value, PMOD(HASH(key), 2) as part +-- FROM src; +-- +--INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) +-- SELECT key, value, PMOD(HASH(key), 2) as part +-- FROM src; +-- +-- +--set hive.optimize.skewjoin.compiletime = true; +---- the test case is wrong? +-- +--CREATE TABLE T1(key STRING, val STRING) +--SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; +--LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +--CREATE TABLE T2(key STRING, val STRING) +--SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; +--LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +-- +--EXPLAIN +--SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +-- +--create table skew_mm(k1 string, k2 string, k3 string, k4 string) SKEWED BY (key) ON ((2)) tblproperties ('hivecommit'='true'); +--INSERT OVERWRITE TABLE skew_mm +--SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; +-- +---- TODO load, acid, etc +-- +-- + +drop table intermediate +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermediate +PREHOOK: Output: default@intermediate +POSTHOOK: query: -- future + + + + + +--drop table partunion_mm; +--drop table merge_mm; +--drop table ctas_mm; +--drop table T1; +--drop table T2; +--drop table skew_mm; +-- +-- +--create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; +-- +--create table partunion_mm(id_mm int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); +-- +-- +--insert into table partunion_mm partition(key_mm) +--select temps.* from ( +--select key as key_mm, key from ctas_mm +--union all +--select key as key_mm, key from simple_mm ) temps; +-- +--set hive.merge.mapredfiles=true; +--set hive.merge.sparkfiles=true; +--set hive.merge.tezfiles=true; +-- +--CREATE TABLE merge_mm (key INT, value STRING) +-- PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties ('hivecommit'='true'); +-- +--EXPLAIN +--INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) +-- SELECT key, value, PMOD(HASH(key), 2) as part +-- FROM src; +-- +--INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) +-- SELECT key, value, PMOD(HASH(key), 2) as part +-- FROM src; +-- +-- +--set hive.optimize.skewjoin.compiletime = true; +---- the test case is wrong? +-- +--CREATE TABLE T1(key STRING, val STRING) +--SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; +--LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; +--CREATE TABLE T2(key STRING, val STRING) +--SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; +--LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; +-- +--EXPLAIN +--SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; +-- +--create table skew_mm(k1 string, k2 string, k3 string, k4 string) SKEWED BY (key) ON ((2)) tblproperties ('hivecommit'='true'); +--INSERT OVERWRITE TABLE skew_mm +--SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; +-- +---- TODO load, acid, etc +-- +-- + +drop table intermediate +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermediate +POSTHOOK: Output: default@intermediate diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index ece6cbffc87a..fe1caee92507 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -1,11 +1,3 @@ -PREHOOK: query: drop table part_mm -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table part_mm -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table simple_mm -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table simple_mm -POSTHOOK: type: DROPTABLE PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE POSTHOOK: query: drop table intermediate @@ -36,193 +28,110 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: query: drop table dp_no_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table dp_no_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table dp_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table dp_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@part_mm -POSTHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: Output: default@dp_no_mm +POSTHOOK: query: create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@part_mm -PREHOOK: query: explain insert into table part_mm partition(key_mm='455') select key from intermediate -PREHOOK: type: QUERY -POSTHOOK: query: explain insert into table part_mm partition(key_mm='455') select key from intermediate -POSTHOOK: type: QUERY -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: intermediate - Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part_mm - Execution mode: llap - LLAP IO: all inputs - - Stage: Stage-2 - Dependency Collection - - Stage: Stage-0 - Move Operator - tables: - partition: - key_mm 455 - replace: false - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.part_mm - micromanaged table: true - - Stage: Stage-3 - Stats-Aggr Operator - -PREHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@part_mm@key_mm=455 -POSTHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@part_mm@key_mm=455 -POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: insert into table part_mm partition(key_mm='456') select key from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@part_mm@key_mm=456 -POSTHOOK: query: insert into table part_mm partition(key_mm='456') select key from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@part_mm@key_mm=456 -POSTHOOK: Lineage: part_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@part_mm@key_mm=455 -POSTHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@part_mm@key_mm=455 -POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from part_mm -PREHOOK: type: QUERY -PREHOOK: Input: default@part_mm -PREHOOK: Input: default@part_mm@key_mm=455 -PREHOOK: Input: default@part_mm@key_mm=456 -#### A masked pattern was here #### -POSTHOOK: query: select * from part_mm -POSTHOOK: type: QUERY -POSTHOOK: Input: default@part_mm -POSTHOOK: Input: default@part_mm@key_mm=455 -POSTHOOK: Input: default@part_mm@key_mm=456 -#### A masked pattern was here #### -0 455 -455 455 -0 455 -455 455 -0 455 -455 455 -0 455 -455 455 -0 456 -455 456 -0 456 -455 456 -PREHOOK: query: create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true') +POSTHOOK: Output: default@dp_no_mm +PREHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc + tblproperties ('hivecommit'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@simple_mm -POSTHOOK: query: create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: Output: default@dp_mm +POSTHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc + tblproperties ('hivecommit'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@simple_mm -PREHOOK: query: insert into table simple_mm select key from intermediate +POSTHOOK: Output: default@dp_mm +PREHOOK: query: insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@simple_mm -POSTHOOK: query: insert into table simple_mm select key from intermediate +PREHOOK: Output: default@dp_no_mm@key1=123 +POSTHOOK: query: insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@simple_mm -POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: insert overwrite table simple_mm select key from intermediate +POSTHOOK: Output: default@dp_no_mm@key1=123/key2=0 +POSTHOOK: Output: default@dp_no_mm@key1=123/key2=455 +POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@simple_mm -POSTHOOK: query: insert overwrite table simple_mm select key from intermediate +PREHOOK: Output: default@dp_mm@key1=123 +POSTHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@simple_mm -POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from simple_mm +POSTHOOK: Output: default@dp_mm@key1=123/key2=0 +POSTHOOK: Output: default@dp_mm@key1=123/key2=455 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from dp_no_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@dp_no_mm +PREHOOK: Input: default@dp_no_mm@key1=123/key2=0 +PREHOOK: Input: default@dp_no_mm@key1=123/key2=455 +#### A masked pattern was here #### +POSTHOOK: query: select * from dp_no_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dp_no_mm +POSTHOOK: Input: default@dp_no_mm@key1=123/key2=0 +POSTHOOK: Input: default@dp_no_mm@key1=123/key2=455 +#### A masked pattern was here #### +455 123 455 +455 123 455 +0 123 0 +0 123 0 +PREHOOK: query: select * from dp_mm PREHOOK: type: QUERY -PREHOOK: Input: default@simple_mm +PREHOOK: Input: default@dp_mm +PREHOOK: Input: default@dp_mm@key1=123/key2=0 +PREHOOK: Input: default@dp_mm@key1=123/key2=455 #### A masked pattern was here #### -POSTHOOK: query: select * from simple_mm +POSTHOOK: query: select * from dp_mm POSTHOOK: type: QUERY -POSTHOOK: Input: default@simple_mm +POSTHOOK: Input: default@dp_mm +POSTHOOK: Input: default@dp_mm@key1=123/key2=0 +POSTHOOK: Input: default@dp_mm@key1=123/key2=455 #### A masked pattern was here #### -0 -455 -0 -455 -PREHOOK: query: drop table part_mm +455 123 455 +455 123 455 +0 123 0 +0 123 0 +PREHOOK: query: drop table dp_no_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@part_mm -PREHOOK: Output: default@part_mm -POSTHOOK: query: drop table part_mm +PREHOOK: Input: default@dp_no_mm +PREHOOK: Output: default@dp_no_mm +POSTHOOK: query: drop table dp_no_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@part_mm -POSTHOOK: Output: default@part_mm -PREHOOK: query: drop table simple_mm +POSTHOOK: Input: default@dp_no_mm +POSTHOOK: Output: default@dp_no_mm +PREHOOK: query: drop table dp_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@simple_mm -PREHOOK: Output: default@simple_mm -POSTHOOK: query: drop table simple_mm +PREHOOK: Input: default@dp_mm +PREHOOK: Output: default@dp_mm +POSTHOOK: query: drop table dp_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@simple_mm -POSTHOOK: Output: default@simple_mm +POSTHOOK: Input: default@dp_mm +POSTHOOK: Output: default@dp_mm PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate From 0ce24b93e1ba92930c316dee0eb1262a27a101c8 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Tue, 4 Oct 2016 11:46:19 -0700 Subject: [PATCH 07/24] HIVE-14638 : handle unions (Sergey Shelukhin) --- .../hadoop/hive/common/HiveStatsUtils.java | 37 +- .../hadoop/hive/common/ValidWriteIds.java | 5 - .../hadoop/hive/ql/exec/FileSinkOperator.java | 137 +++++-- .../hadoop/hive/ql/io/HiveInputFormat.java | 46 ++- .../apache/hadoop/hive/ql/metadata/Hive.java | 3 + ql/src/test/queries/clientpositive/mm_all.q | 67 +++- .../test/queries/clientpositive/mm_current.q | 31 +- .../results/clientpositive/llap/mm_all.q.out | 377 ++++++++++++++++-- .../clientpositive/llap/mm_current.q.out | 186 +++++---- 9 files changed, 690 insertions(+), 199 deletions(-) diff --git a/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java b/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java index 111d99c143c1..745a868d3ff4 100644 --- a/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.LinkedList; import java.util.List; import org.apache.hadoop.conf.Configuration; @@ -30,6 +31,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Lists; + /** * HiveStatsUtils. * A collection of utilities used for hive statistics. @@ -53,11 +56,17 @@ public class HiveStatsUtils { */ public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem fs) throws IOException { - return getFileStatusRecurse(path, level, fs, FileUtils.HIDDEN_FILES_PATH_FILTER); + return getFileStatusRecurse(path, level, fs, FileUtils.HIDDEN_FILES_PATH_FILTER, false); } public static FileStatus[] getFileStatusRecurse( Path path, int level, FileSystem fs, PathFilter filter) throws IOException { + return getFileStatusRecurse(path, level, fs, filter, false); + } + + public static FileStatus[] getFileStatusRecurse( + Path path, int level, FileSystem fs, PathFilter filter, boolean allLevelsBelow) + throws IOException { // if level is <0, the return all files/directories under the specified path if (level < 0) { @@ -81,7 +90,31 @@ public static FileStatus[] getFileStatusRecurse( sb.append(Path.SEPARATOR).append("*"); } Path pathPattern = new Path(path, sb.toString()); - return fs.globStatus(pathPattern, filter); + if (!allLevelsBelow) { + return fs.globStatus(pathPattern, filter); + } + LinkedList queue = new LinkedList<>(); + List results = new ArrayList(); + for (FileStatus status : fs.globStatus(pathPattern)) { + if (filter.accept(status.getPath())) { + results.add(status); + } + if (status.isDirectory()) { + queue.add(status); + } + } + while (!queue.isEmpty()) { + FileStatus status = queue.poll(); + for (FileStatus child : fs.listStatus(status.getPath())) { + if (filter.accept(child.getPath())) { + results.add(child); + } + if (child.isDirectory()) { + queue.add(child); + } + } + } + return results.toArray(new FileStatus[results.size()]); } public static int getNumBitVectorsForNDVEstimation(Configuration conf) throws Exception { diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java index b939b43247f8..160f4c079bab 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java +++ b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java @@ -116,11 +116,6 @@ public boolean isValid(long writeId) { return ids != null && (areIdsValid == ids.contains(writeId)); } - public boolean isValidInput(Path file) { - Long writeId = extractWriteId(file); - return (writeId != null) && isValid(writeId); - } - public static String getMmFilePrefix(long mmWriteId) { return MM_PREFIX + "_" + mmWriteId; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index f11a7c376c60..00115fe7a431 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -20,15 +20,12 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_TEMPORARY_TABLE_STORAGE; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.Serializable; import java.io.StringWriter; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Properties; @@ -40,6 +37,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.StatsSetupConst; @@ -96,6 +94,7 @@ /** * File Sink operator implementation. **/ +@SuppressWarnings("deprecation") public class FileSinkOperator extends TerminalOperator implements Serializable { @@ -386,12 +385,16 @@ private void initializeSpecPath() { // 'Parent' if ((!conf.isLinkedFileSink()) || (dpCtx == null)) { specPath = conf.getDirName(); + Utilities.LOG14535.info("Setting up FSOP " + System.identityHashCode(this) + " (" + + conf.isLinkedFileSink() + ") with " + taskId + " and " + specPath); childSpecPathDynLinkedPartitions = null; return; } specPath = conf.getParentDir(); childSpecPathDynLinkedPartitions = conf.getDirName().getName(); + Utilities.LOG14535.info("Setting up FSOP " + System.identityHashCode(this) + " (" + + conf.isLinkedFileSink() + ") with " + taskId + " and " + specPath); } /** Kryo ctor. */ @@ -1126,7 +1129,8 @@ public void closeOp(boolean abort) throws HiveException { } } if (!commitPaths.isEmpty()) { - Path manifestPath = new Path(specPath, "_tmp." + ValidWriteIds.getMmFilePrefix( + Path manifestPath = getManifestDir(specPath, childSpecPathDynLinkedPartitions); + manifestPath = new Path(manifestPath, "_tmp." + ValidWriteIds.getMmFilePrefix( conf.getMmWriteId()) + "_" + taskId + MANIFEST_EXTENSION); Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths); try { @@ -1161,6 +1165,10 @@ public void closeOp(boolean abort) throws HiveException { super.closeOp(abort); } + private static Path getManifestDir(Path specPath, String unionSuffix) { + return (unionSuffix == null) ? specPath : new Path(specPath, unionSuffix); + } + /** * @return the name of the operator */ @@ -1179,15 +1187,17 @@ public void jobCloseOp(Configuration hconf, boolean success) try { if ((conf != null) && isNativeTable) { Path specPath = conf.getDirName(); + String unionSuffix = null; DynamicPartitionCtx dpCtx = conf.getDynPartCtx(); if (conf.isLinkedFileSink() && (dpCtx != null)) { specPath = conf.getParentDir(); Utilities.LOG14535.info("Setting specPath to " + specPath + " for dynparts"); + unionSuffix = conf.getDirName().getName(); } if (!conf.isMmTable()) { Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, reporter); // TODO# other callers } else { - handleMmTable(specPath, hconf, success, dpCtx, conf, reporter); + handleMmTable(specPath, unionSuffix, hconf, success, dpCtx, conf, reporter); } } } catch (IOException e) { @@ -1196,33 +1206,64 @@ public void jobCloseOp(Configuration hconf, boolean success) super.jobCloseOp(hconf, success); } - private void handleMmTable(Path specPath, Configuration hconf, boolean success, - DynamicPartitionCtx dpCtx, FileSinkDesc conf, Reporter reporter) + private static FileStatus[] getMmDirectoryCandidates(FileSystem fs, Path path, + int dpLevels, String unionSuffix, PathFilter filter) throws IOException { + StringBuilder sb = new StringBuilder(path.toUri().getPath()); + for (int i = 0; i < dpLevels; i++) { + sb.append(Path.SEPARATOR).append("*"); + } + if (unionSuffix != null) { + sb.append(Path.SEPARATOR).append(unionSuffix); + } + sb.append(Path.SEPARATOR).append("*"); // TODO: we could add exact mm prefix here + Path pathPattern = new Path(path, sb.toString()); + return fs.globStatus(pathPattern, filter); + } + + private void handleMmTable(Path specPath, String unionSuffix, Configuration hconf, + boolean success, DynamicPartitionCtx dpCtx, FileSinkDesc conf, Reporter reporter) throws IOException, HiveException { FileSystem fs = specPath.getFileSystem(hconf); // Manifests would be at the root level, but the results at target level. // TODO# special case - doesn't take bucketing into account - int targetLevel = (dpCtx == null) ? 1 : (dpCtx.getNumDPCols() + 1); - int manifestLevel = 1; + Path manifestDir = getManifestDir(specPath, unionSuffix); + ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(conf.getMmWriteId(), true); if (!success) { - deleteMatchingFiles(specPath, fs, targetLevel, filter); - deleteMatchingFiles(specPath, fs, manifestLevel, filter); + tryDeleteAllMmFiles(fs, specPath, manifestDir, dpCtx, unionSuffix, filter); return; } - FileStatus[] files = HiveStatsUtils.getFileStatusRecurse(specPath, manifestLevel, fs, filter); - List manifests = new ArrayList<>(files.length); + FileStatus[] files = HiveStatsUtils.getFileStatusRecurse(manifestDir, 1, fs, filter); + Utilities.LOG14535.info("Looking for manifests in: " + manifestDir); + List manifests = new ArrayList<>(); if (files != null) { for (FileStatus status : files) { - if (status.getPath().getName().endsWith(MANIFEST_EXTENSION)) { - manifests.add(status.getPath()); - } else if (!status.isDirectory()) { - Path path = status.getPath(); - Utilities.LOG14535.warn("Unknown file found - neither a manifest nor directory: " + path); - tryDelete(fs, path); + Path path = status.getPath(); + if (path.getName().endsWith(MANIFEST_EXTENSION)) { + manifests.add(path); } } } + + Utilities.LOG14535.info("Looking for files in: " + specPath); + files = getMmDirectoryCandidates(fs, specPath, + dpCtx == null ? 0 : dpCtx.getNumDPCols(), unionSuffix, filter); + ArrayList results = new ArrayList<>(); + if (files != null) { + for (FileStatus status : files) { + Path path = status.getPath(); + Utilities.LOG14535.info("Looking at path: " + path + " from " + System.identityHashCode(this)); + if (!status.isDirectory()) { + if (!path.getName().endsWith(MANIFEST_EXTENSION)) { + Utilities.LOG14535.warn("Unknown file found, deleting: " + path); + tryDelete(fs, path); + } + } else { + results.add(status); + } + } + } + HashSet committed = new HashSet<>(); for (Path mfp : manifests) { try (FSDataInputStream mdis = fs.open(mfp)) { @@ -1236,22 +1277,14 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, } } - files = HiveStatsUtils.getFileStatusRecurse(specPath, targetLevel, fs, filter); - LinkedList results = new LinkedList<>(); - for (FileStatus status : files) { - if (!status.isDirectory()) { - Path path = status.getPath(); - Utilities.LOG14535.warn("Unknown file found - neither a manifest nor directory: " + path); - tryDelete(fs, path); - } else { - for (FileStatus child : fs.listStatus(status.getPath())) { - Path path = child.getPath(); - if (committed.remove(path.toString())) continue; // A good file. - Utilities.LOG14535.info("Deleting " + path + " that was not committed"); - // We should actually succeed here - if we fail, don't commit the query. - if (!fs.delete(path, true)) { - throw new HiveException("Failed to delete an uncommitted path " + path); - } + for (FileStatus status : results) { + for (FileStatus child : fs.listStatus(status.getPath())) { + Path childPath = child.getPath(); + if (committed.remove(childPath.toString())) continue; // A good file. + Utilities.LOG14535.info("Deleting " + childPath + " that was not committed"); + // We should actually succeed here - if we fail, don't commit the query. + if (!fs.delete(childPath, true)) { + throw new HiveException("Failed to delete an uncommitted path " + childPath); } } } @@ -1263,11 +1296,19 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, Utilities.LOG14535.info("Deleting manifest " + mfp); tryDelete(fs, mfp); } + // Delete the manifest directory if we only created it for manifests; otherwise the + // dynamic partition loader will find it and try to load it as a partition... what a mess. + if (manifestDir != specPath) { + FileStatus[] remainingFiles = fs.listStatus(manifestDir); + if (remainingFiles == null || remainingFiles.length == 0) { + Utilities.LOG14535.info("Deleting directory " + manifestDir); + tryDelete(fs, manifestDir); + } + } if (results.isEmpty()) return; FileStatus[] finalResults = results.toArray(new FileStatus[results.size()]); - // TODO# dp may break - removeTempOrDuplicateFiles assumes dirs in results. Why? We recurse... List emptyBuckets = Utilities.removeTempOrDuplicateFiles( fs, finalResults, dpCtx, conf, hconf); // create empty buckets if necessary @@ -1276,15 +1317,27 @@ private void handleMmTable(Path specPath, Configuration hconf, boolean success, } } - private void deleteMatchingFiles(Path specPath, FileSystem fs, - int targetLevel, ValidWriteIds.IdPathFilter filter) throws IOException { - for (FileStatus status : HiveStatsUtils.getFileStatusRecurse(specPath, targetLevel, fs, - filter)) { - Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); - tryDelete(fs, status.getPath()); + private void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manifestDir, + DynamicPartitionCtx dpCtx, String unionSuffix, + ValidWriteIds.IdPathFilter filter) throws IOException { + FileStatus[] files = getMmDirectoryCandidates(fs, specPath, + dpCtx == null ? 0 : dpCtx.getNumDPCols(), unionSuffix, filter); + if (files != null) { + for (FileStatus status : files) { + Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); + tryDelete(fs, status.getPath()); + } + } + files = HiveStatsUtils.getFileStatusRecurse(manifestDir, 1, fs, filter); + if (files != null) { + for (FileStatus status : files) { + Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); + tryDelete(fs, status.getPath()); + } } } + private void tryDelete(FileSystem fs, Path path) { try { fs.delete(path, true); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 0510e08ba4a6..c3e2681f130f 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -29,6 +29,7 @@ import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -42,6 +43,7 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.ValidWriteIds; @@ -352,7 +354,9 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job TableDesc table, Map writeIdMap, List result) throws IOException { ValidWriteIds writeIds = extractWriteIds(writeIdMap, conf, table.getTableName()); - Utilities.LOG14535.info("Observing " + table.getTableName() + ": " + writeIds); + if (writeIds != null) { + Utilities.LOG14535.info("Observing " + table.getTableName() + ": " + writeIds); + } Utilities.copyTablePropertiesToConf(table, conf); @@ -394,22 +398,40 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job private void processForWriteIds(Path dir, JobConf conf, ValidWriteIds writeIds, List finalPaths) throws IOException { - FileStatus[] files = dir.getFileSystem(conf).listStatus(dir); // TODO: batch? + FileSystem fs = dir.getFileSystem(conf); + FileStatus[] files = fs.listStatus(dir); // TODO: batch? + LinkedList subdirs = new LinkedList<>(); for (FileStatus file : files) { - Path subdir = file.getPath(); - if (!file.isDirectory()) { - Utilities.LOG14535.warn("Found a file not in subdirectory " + subdir); - continue; - } - if (!writeIds.isValidInput(subdir)) { - Utilities.LOG14535.warn("Ignoring an uncommitted directory " + subdir); - continue; + handleNonMmDirChild(file, writeIds, subdirs, finalPaths); + } + while (!subdirs.isEmpty()) { + Path subdir = subdirs.poll(); + for (FileStatus file : fs.listStatus(subdir)) { + handleNonMmDirChild(file, writeIds, subdirs, finalPaths); } - Utilities.LOG14535.info("Adding input " + subdir); - finalPaths.add(subdir); } } + private void handleNonMmDirChild(FileStatus file, ValidWriteIds writeIds, + LinkedList subdirs, List finalPaths) { + Path path = file.getPath(); + if (!file.isDirectory()) { + Utilities.LOG14535.warn("Ignoring a file not in MM directory " + path); + return; + } + Long writeId = ValidWriteIds.extractWriteId(path); + if (writeId == null) { + subdirs.add(path); + return; + } + if (!writeIds.isValid(writeId)) { + Utilities.LOG14535.warn("Ignoring an uncommitted directory " + path); + return; + } + Utilities.LOG14535.info("Adding input " + path); + finalPaths.add(path); + } + Path[] getInputPaths(JobConf job) throws IOException { Path[] dirs; if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 6cd050018a41..73a3b1957300 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1597,6 +1597,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par getConf(), new ValidWriteIds.IdPathFilter(mmWriteId, false)); } } else { + Utilities.LOG14535.info("moving " + loadPath + " to " + newPartPath); if (replace || (oldPart == null && !isAcid)) { replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(), isSrcLocal); @@ -2016,6 +2017,7 @@ public void loadTable(Path loadPath, String tableName, boolean replace, boolean newFiles = Collections.synchronizedList(new ArrayList()); } if (mmWriteId == null) { + Utilities.LOG14535.info("moving " + loadPath + " to " + tbl.getPath()); if (replace) { Path tableDest = tbl.getPath(); replaceFiles(tableDest, loadPath, tableDest, tableDest, sessionConf, isSrcLocal); @@ -2029,6 +2031,7 @@ public void loadTable(Path loadPath, String tableName, boolean replace, boolean } } } else { + Utilities.LOG14535.info("not moving " + loadPath + " to " + tbl.getPath()); if (replace) { Path tableDest = tbl.getPath(); deleteOldPathForReplace(tableDest, tableDest, sessionConf, diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index 59171afdfb76..cc44c19333d5 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -1,6 +1,10 @@ set hive.mapred.mode=nonstrict; set hive.explain.user=false; set hive.fetch.task.conversion=none; +set tez.grouping.min-size=1; +set tez.grouping.max-size=2; +set hive.exec.dynamic.partition.mode=nonstrict; + -- Force multiple writers when reading drop table intermediate; @@ -22,6 +26,8 @@ create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true' insert into table simple_mm select key from intermediate; insert overwrite table simple_mm select key from intermediate; select * from simple_mm; +insert into table simple_mm select key from intermediate; +select * from simple_mm; drop table simple_mm; @@ -50,6 +56,65 @@ drop table dp_no_mm; drop table dp_mm; +-- union + +create table union_mm(id int) tblproperties ('hivecommit'='true'); +insert into table union_mm +select temps.p from ( +select key as p from intermediate +union all +select key + 1 as p from intermediate ) temps; + +select * from union_mm order by id; + +insert into table union_mm +select p from +( +select key + 1 as p from intermediate +union all +select key from intermediate +) tab group by p +union all +select key + 2 as p from intermediate; + +select * from union_mm order by id; + +insert into table union_mm +SELECT p FROM +( + SELECT key + 1 as p FROM intermediate + UNION ALL + SELECT key as p FROM ( + SELECT distinct key FROM ( + SELECT key FROM ( + SELECT key + 2 as key FROM intermediate + UNION ALL + SELECT key FROM intermediate + )t1 + group by key)t2 + )t3 +)t4 +group by p; + + +select * from union_mm order by id; +drop table union_mm; + + +create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true'); +insert into table partunion_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps; + +select * from partunion_mm; +drop table partunion_mm; + +-- TODO# from here, fix it + + + -- future @@ -110,7 +175,7 @@ drop table dp_mm; --INSERT OVERWRITE TABLE skew_mm --SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; -- ----- TODO load, acid, etc +---- TODO load, multi-insert etc -- -- diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index b551176e990b..e1fb3d9be773 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -12,27 +12,28 @@ insert into table intermediate partition(p='455') select key from src limit 2; insert into table intermediate partition(p='456') select key from src limit 2; +create table partunion_no_mm(id int) partitioned by (key int); +insert into table partunion_no_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps; -drop table dp_no_mm; -drop table dp_mm; +select * from partunion_no_mm; +drop table partunion_no_mm; -set hive.merge.mapredfiles=false; -set hive.merge.sparkfiles=false; -set hive.merge.tezfiles=false; -create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc; -create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc - tblproperties ('hivecommit'='true'); +create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true'); +insert into table partunion_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps; -insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate; +select * from partunion_mm; +drop table partunion_mm; -insert into table dp_mm partition (key1='123', key2) select key, key from intermediate; -select * from dp_no_mm; -select * from dp_mm; - -drop table dp_no_mm; -drop table dp_mm; drop table intermediate; diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out index b0c9c0aebaaf..0a8bb40dbeb0 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -147,18 +147,18 @@ POSTHOOK: Input: default@part_mm POSTHOOK: Input: default@part_mm@key_mm=455 POSTHOOK: Input: default@part_mm@key_mm=456 #### A masked pattern was here #### -238 455 -86 455 -238 455 -86 455 -238 455 -86 455 -238 455 -86 455 -238 456 -86 456 -238 456 -86 456 +0 455 +455 455 +0 455 +455 455 +0 455 +455 455 +0 455 +455 455 +0 456 +455 456 +0 456 +455 456 PREHOOK: query: drop table part_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@part_mm @@ -213,10 +213,39 @@ POSTHOOK: query: select * from simple_mm POSTHOOK: type: QUERY POSTHOOK: Input: default@simple_mm #### A masked pattern was here #### -238 -86 -238 -86 +0 +455 +0 +455 +PREHOOK: query: insert into table simple_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@simple_mm +POSTHOOK: query: insert into table simple_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@simple_mm +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from simple_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@simple_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from simple_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@simple_mm +#### A masked pattern was here #### +0 +455 +0 +455 +0 +455 +0 +455 PREHOOK: query: drop table simple_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@simple_mm @@ -264,10 +293,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@dp_no_mm@key1=123/key2=238 -POSTHOOK: Output: default@dp_no_mm@key1=123/key2=86 -POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=238).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=86).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Output: default@dp_no_mm@key1=123/key2=0 +POSTHOOK: Output: default@dp_no_mm@key1=123/key2=455 +POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate @@ -279,42 +308,42 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@dp_mm@key1=123/key2=238 -POSTHOOK: Output: default@dp_mm@key1=123/key2=86 -POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=238).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=86).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Output: default@dp_mm@key1=123/key2=0 +POSTHOOK: Output: default@dp_mm@key1=123/key2=455 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from dp_no_mm PREHOOK: type: QUERY PREHOOK: Input: default@dp_no_mm -PREHOOK: Input: default@dp_no_mm@key1=123/key2=238 -PREHOOK: Input: default@dp_no_mm@key1=123/key2=86 +PREHOOK: Input: default@dp_no_mm@key1=123/key2=0 +PREHOOK: Input: default@dp_no_mm@key1=123/key2=455 #### A masked pattern was here #### POSTHOOK: query: select * from dp_no_mm POSTHOOK: type: QUERY POSTHOOK: Input: default@dp_no_mm -POSTHOOK: Input: default@dp_no_mm@key1=123/key2=238 -POSTHOOK: Input: default@dp_no_mm@key1=123/key2=86 +POSTHOOK: Input: default@dp_no_mm@key1=123/key2=0 +POSTHOOK: Input: default@dp_no_mm@key1=123/key2=455 #### A masked pattern was here #### -238 123 238 -238 123 238 -86 123 86 -86 123 86 +455 123 455 +455 123 455 +0 123 0 +0 123 0 PREHOOK: query: select * from dp_mm PREHOOK: type: QUERY PREHOOK: Input: default@dp_mm -PREHOOK: Input: default@dp_mm@key1=123/key2=238 -PREHOOK: Input: default@dp_mm@key1=123/key2=86 +PREHOOK: Input: default@dp_mm@key1=123/key2=0 +PREHOOK: Input: default@dp_mm@key1=123/key2=455 #### A masked pattern was here #### POSTHOOK: query: select * from dp_mm POSTHOOK: type: QUERY POSTHOOK: Input: default@dp_mm -POSTHOOK: Input: default@dp_mm@key1=123/key2=238 -POSTHOOK: Input: default@dp_mm@key1=123/key2=86 +POSTHOOK: Input: default@dp_mm@key1=123/key2=0 +POSTHOOK: Input: default@dp_mm@key1=123/key2=455 #### A masked pattern was here #### -238 123 238 -238 123 238 -86 123 86 -86 123 86 +455 123 455 +455 123 455 +0 123 0 +0 123 0 PREHOOK: query: drop table dp_no_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@dp_no_mm @@ -331,7 +360,262 @@ POSTHOOK: query: drop table dp_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@dp_mm POSTHOOK: Output: default@dp_mm -PREHOOK: query: -- future +PREHOOK: query: -- union + +create table union_mm(id int) tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@union_mm +POSTHOOK: query: -- union + +create table union_mm(id int) tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@union_mm +PREHOOK: query: insert into table union_mm +select temps.p from ( +select key as p from intermediate +union all +select key + 1 as p from intermediate ) temps +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@union_mm +POSTHOOK: query: insert into table union_mm +select temps.p from ( +select key as p from intermediate +union all +select key + 1 as p from intermediate ) temps +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@union_mm +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from union_mm order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@union_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from union_mm order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@union_mm +#### A masked pattern was here #### +0 +0 +1 +1 +455 +455 +456 +456 +PREHOOK: query: insert into table union_mm +select p from +( +select key + 1 as p from intermediate +union all +select key from intermediate +) tab group by p +union all +select key + 2 as p from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@union_mm +POSTHOOK: query: insert into table union_mm +select p from +( +select key + 1 as p from intermediate +union all +select key from intermediate +) tab group by p +union all +select key + 2 as p from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@union_mm +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from union_mm order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@union_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from union_mm order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@union_mm +#### A masked pattern was here #### +0 +0 +0 +1 +1 +1 +2 +2 +455 +455 +455 +456 +456 +456 +457 +457 +PREHOOK: query: insert into table union_mm +SELECT p FROM +( + SELECT key + 1 as p FROM intermediate + UNION ALL + SELECT key as p FROM ( + SELECT distinct key FROM ( + SELECT key FROM ( + SELECT key + 2 as key FROM intermediate + UNION ALL + SELECT key FROM intermediate + )t1 + group by key)t2 + )t3 +)t4 +group by p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@union_mm +POSTHOOK: query: insert into table union_mm +SELECT p FROM +( + SELECT key + 1 as p FROM intermediate + UNION ALL + SELECT key as p FROM ( + SELECT distinct key FROM ( + SELECT key FROM ( + SELECT key + 2 as key FROM intermediate + UNION ALL + SELECT key FROM intermediate + )t1 + group by key)t2 + )t3 +)t4 +group by p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@union_mm +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from union_mm order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@union_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from union_mm order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@union_mm +#### A masked pattern was here #### +0 +0 +0 +0 +1 +1 +1 +1 +2 +2 +2 +455 +455 +455 +455 +456 +456 +456 +456 +457 +457 +457 +PREHOOK: query: drop table union_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@union_mm +PREHOOK: Output: default@union_mm +POSTHOOK: query: drop table union_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@union_mm +POSTHOOK: Output: default@union_mm +PREHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@partunion_mm +POSTHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partunion_mm +PREHOOK: query: insert into table partunion_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@partunion_mm +POSTHOOK: query: insert into table partunion_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@partunion_mm@key=0 +POSTHOOK: Output: default@partunion_mm@key=1 +POSTHOOK: Output: default@partunion_mm@key=455 +POSTHOOK: Output: default@partunion_mm@key=456 +POSTHOOK: Lineage: partunion_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=455).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=456).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from partunion_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@partunion_mm +PREHOOK: Input: default@partunion_mm@key=0 +PREHOOK: Input: default@partunion_mm@key=1 +PREHOOK: Input: default@partunion_mm@key=455 +PREHOOK: Input: default@partunion_mm@key=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from partunion_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partunion_mm +POSTHOOK: Input: default@partunion_mm@key=0 +POSTHOOK: Input: default@partunion_mm@key=1 +POSTHOOK: Input: default@partunion_mm@key=455 +POSTHOOK: Input: default@partunion_mm@key=456 +#### A masked pattern was here #### +0 0 +0 0 +1 1 +1 1 +455 455 +455 455 +456 456 +456 456 +PREHOOK: query: drop table partunion_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@partunion_mm +PREHOOK: Output: default@partunion_mm +POSTHOOK: query: drop table partunion_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@partunion_mm +POSTHOOK: Output: default@partunion_mm +PREHOOK: query: -- TODO# from here, fix it + + + + +-- future @@ -390,7 +674,7 @@ PREHOOK: query: -- future --INSERT OVERWRITE TABLE skew_mm --SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; -- ----- TODO load, acid, etc +---- TODO load, multi-insert etc -- -- @@ -398,7 +682,12 @@ drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: -- future +POSTHOOK: query: -- TODO# from here, fix it + + + + +-- future @@ -457,7 +746,7 @@ POSTHOOK: query: -- future --INSERT OVERWRITE TABLE skew_mm --SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; -- ----- TODO load, acid, etc +---- TODO load, multi-insert etc -- -- diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index fe1caee92507..87214ba55d60 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -28,110 +28,140 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: drop table dp_no_mm -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table dp_no_mm -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table dp_mm -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table dp_mm -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@dp_no_mm -POSTHOOK: query: create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@dp_no_mm -PREHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc - tblproperties ('hivecommit'='true') +PREHOOK: query: create table partunion_no_mm(id int) partitioned by (key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@dp_mm -POSTHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc - tblproperties ('hivecommit'='true') +PREHOOK: Output: default@partunion_no_mm +POSTHOOK: query: create table partunion_no_mm(id int) partitioned by (key int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@dp_mm -PREHOOK: query: insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate +POSTHOOK: Output: default@partunion_no_mm +PREHOOK: query: insert into table partunion_no_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@dp_no_mm@key1=123 -POSTHOOK: query: insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate +PREHOOK: Output: default@partunion_no_mm +POSTHOOK: query: insert into table partunion_no_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@dp_no_mm@key1=123/key2=0 -POSTHOOK: Output: default@dp_no_mm@key1=123/key2=455 -POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate +POSTHOOK: Output: default@partunion_no_mm@key=0 +POSTHOOK: Output: default@partunion_no_mm@key=1 +POSTHOOK: Output: default@partunion_no_mm@key=455 +POSTHOOK: Output: default@partunion_no_mm@key=456 +POSTHOOK: Lineage: partunion_no_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_no_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_no_mm PARTITION(key=455).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_no_mm PARTITION(key=456).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from partunion_no_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@partunion_no_mm +PREHOOK: Input: default@partunion_no_mm@key=0 +PREHOOK: Input: default@partunion_no_mm@key=1 +PREHOOK: Input: default@partunion_no_mm@key=455 +PREHOOK: Input: default@partunion_no_mm@key=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from partunion_no_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partunion_no_mm +POSTHOOK: Input: default@partunion_no_mm@key=0 +POSTHOOK: Input: default@partunion_no_mm@key=1 +POSTHOOK: Input: default@partunion_no_mm@key=455 +POSTHOOK: Input: default@partunion_no_mm@key=456 +#### A masked pattern was here #### +0 0 +0 0 +1 1 +1 1 +455 455 +455 455 +456 456 +456 456 +PREHOOK: query: drop table partunion_no_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@partunion_no_mm +PREHOOK: Output: default@partunion_no_mm +POSTHOOK: query: drop table partunion_no_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@partunion_no_mm +POSTHOOK: Output: default@partunion_no_mm +PREHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@partunion_mm +POSTHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partunion_mm +PREHOOK: query: insert into table partunion_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@dp_mm@key1=123 -POSTHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate +PREHOOK: Output: default@partunion_mm +POSTHOOK: query: insert into table partunion_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@dp_mm@key1=123/key2=0 -POSTHOOK: Output: default@dp_mm@key1=123/key2=455 -POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from dp_no_mm +POSTHOOK: Output: default@partunion_mm@key=0 +POSTHOOK: Output: default@partunion_mm@key=1 +POSTHOOK: Output: default@partunion_mm@key=455 +POSTHOOK: Output: default@partunion_mm@key=456 +POSTHOOK: Lineage: partunion_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=455).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=456).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from partunion_mm PREHOOK: type: QUERY -PREHOOK: Input: default@dp_no_mm -PREHOOK: Input: default@dp_no_mm@key1=123/key2=0 -PREHOOK: Input: default@dp_no_mm@key1=123/key2=455 +PREHOOK: Input: default@partunion_mm +PREHOOK: Input: default@partunion_mm@key=0 +PREHOOK: Input: default@partunion_mm@key=1 +PREHOOK: Input: default@partunion_mm@key=455 +PREHOOK: Input: default@partunion_mm@key=456 #### A masked pattern was here #### -POSTHOOK: query: select * from dp_no_mm +POSTHOOK: query: select * from partunion_mm POSTHOOK: type: QUERY -POSTHOOK: Input: default@dp_no_mm -POSTHOOK: Input: default@dp_no_mm@key1=123/key2=0 -POSTHOOK: Input: default@dp_no_mm@key1=123/key2=455 +POSTHOOK: Input: default@partunion_mm +POSTHOOK: Input: default@partunion_mm@key=0 +POSTHOOK: Input: default@partunion_mm@key=1 +POSTHOOK: Input: default@partunion_mm@key=455 +POSTHOOK: Input: default@partunion_mm@key=456 #### A masked pattern was here #### -455 123 455 -455 123 455 -0 123 0 -0 123 0 -PREHOOK: query: select * from dp_mm -PREHOOK: type: QUERY -PREHOOK: Input: default@dp_mm -PREHOOK: Input: default@dp_mm@key1=123/key2=0 -PREHOOK: Input: default@dp_mm@key1=123/key2=455 -#### A masked pattern was here #### -POSTHOOK: query: select * from dp_mm -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dp_mm -POSTHOOK: Input: default@dp_mm@key1=123/key2=0 -POSTHOOK: Input: default@dp_mm@key1=123/key2=455 -#### A masked pattern was here #### -455 123 455 -455 123 455 -0 123 0 -0 123 0 -PREHOOK: query: drop table dp_no_mm -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dp_no_mm -PREHOOK: Output: default@dp_no_mm -POSTHOOK: query: drop table dp_no_mm -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dp_no_mm -POSTHOOK: Output: default@dp_no_mm -PREHOOK: query: drop table dp_mm +0 0 +0 0 +1 1 +1 1 +455 455 +455 455 +456 456 +456 456 +PREHOOK: query: drop table partunion_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dp_mm -PREHOOK: Output: default@dp_mm -POSTHOOK: query: drop table dp_mm +PREHOOK: Input: default@partunion_mm +PREHOOK: Output: default@partunion_mm +POSTHOOK: query: drop table partunion_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dp_mm -POSTHOOK: Output: default@dp_mm +POSTHOOK: Input: default@partunion_mm +POSTHOOK: Output: default@partunion_mm PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate From b9e815722686f5bcdae40caadae34f84509d86c2 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Tue, 11 Oct 2016 16:09:28 -0700 Subject: [PATCH 08/24] HIVE-14639 : handle SKEWED BY for MM tables (Sergey Shelukhin) --- .../hadoop/hive/ql/exec/FileSinkOperator.java | 31 +- .../apache/hadoop/hive/ql/exec/Utilities.java | 2 +- .../hadoop/hive/ql/io/HiveInputFormat.java | 2 + .../apache/hadoop/hive/ql/metadata/Hive.java | 6 +- ql/src/test/queries/clientpositive/mm_all.q | 76 ++- .../test/queries/clientpositive/mm_current.q | 45 +- .../results/clientpositive/llap/mm_all.q.out | 473 ++++++++++-------- .../clientpositive/llap/mm_current.q.out | 215 ++++---- 8 files changed, 473 insertions(+), 377 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 00115fe7a431..5902036705e0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -1189,6 +1189,7 @@ public void jobCloseOp(Configuration hconf, boolean success) Path specPath = conf.getDirName(); String unionSuffix = null; DynamicPartitionCtx dpCtx = conf.getDynPartCtx(); + ListBucketingCtx lbCtx = conf.getLbCtx(); if (conf.isLinkedFileSink() && (dpCtx != null)) { specPath = conf.getParentDir(); Utilities.LOG14535.info("Setting specPath to " + specPath + " for dynparts"); @@ -1197,7 +1198,7 @@ public void jobCloseOp(Configuration hconf, boolean success) if (!conf.isMmTable()) { Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, reporter); // TODO# other callers } else { - handleMmTable(specPath, unionSuffix, hconf, success, dpCtx, conf, reporter); + handleMmTable(specPath, unionSuffix, hconf, success, dpCtx, lbCtx, conf, reporter); } } } catch (IOException e) { @@ -1207,22 +1208,26 @@ public void jobCloseOp(Configuration hconf, boolean success) } private static FileStatus[] getMmDirectoryCandidates(FileSystem fs, Path path, - int dpLevels, String unionSuffix, PathFilter filter) throws IOException { + DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx, String unionSuffix, PathFilter filter) + throws IOException { StringBuilder sb = new StringBuilder(path.toUri().getPath()); - for (int i = 0; i < dpLevels; i++) { + int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), + lbLevels = lbCtx == null ? 0 : lbCtx.getSkewedColNames().size(); + for (int i = 0; i < dpLevels + lbLevels; i++) { sb.append(Path.SEPARATOR).append("*"); } if (unionSuffix != null) { sb.append(Path.SEPARATOR).append(unionSuffix); } sb.append(Path.SEPARATOR).append("*"); // TODO: we could add exact mm prefix here + Utilities.LOG14535.info("Looking for files via: " + sb.toString()); Path pathPattern = new Path(path, sb.toString()); return fs.globStatus(pathPattern, filter); } private void handleMmTable(Path specPath, String unionSuffix, Configuration hconf, - boolean success, DynamicPartitionCtx dpCtx, FileSinkDesc conf, Reporter reporter) - throws IOException, HiveException { + boolean success, DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx, FileSinkDesc conf, + Reporter reporter) throws IOException, HiveException { FileSystem fs = specPath.getFileSystem(hconf); // Manifests would be at the root level, but the results at target level. // TODO# special case - doesn't take bucketing into account @@ -1230,7 +1235,7 @@ private void handleMmTable(Path specPath, String unionSuffix, Configuration hcon ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(conf.getMmWriteId(), true); if (!success) { - tryDeleteAllMmFiles(fs, specPath, manifestDir, dpCtx, unionSuffix, filter); + tryDeleteAllMmFiles(fs, specPath, manifestDir, dpCtx, lbCtx, unionSuffix, filter); return; } FileStatus[] files = HiveStatsUtils.getFileStatusRecurse(manifestDir, 1, fs, filter); @@ -1240,14 +1245,14 @@ private void handleMmTable(Path specPath, String unionSuffix, Configuration hcon for (FileStatus status : files) { Path path = status.getPath(); if (path.getName().endsWith(MANIFEST_EXTENSION)) { + Utilities.LOG14535.info("Reading manifest " + path); manifests.add(path); } } } Utilities.LOG14535.info("Looking for files in: " + specPath); - files = getMmDirectoryCandidates(fs, specPath, - dpCtx == null ? 0 : dpCtx.getNumDPCols(), unionSuffix, filter); + files = getMmDirectoryCandidates(fs, specPath, dpCtx, lbCtx, unionSuffix, filter); ArrayList results = new ArrayList<>(); if (files != null) { for (FileStatus status : files) { @@ -1307,8 +1312,11 @@ private void handleMmTable(Path specPath, String unionSuffix, Configuration hcon } if (results.isEmpty()) return; - FileStatus[] finalResults = results.toArray(new FileStatus[results.size()]); + // TODO: see HIVE-14886 - removeTempOrDuplicateFiles is broken for list bucketing, + // so maintain parity here by not calling it at all. + if (lbCtx != null) return; + FileStatus[] finalResults = results.toArray(new FileStatus[results.size()]); List emptyBuckets = Utilities.removeTempOrDuplicateFiles( fs, finalResults, dpCtx, conf, hconf); // create empty buckets if necessary @@ -1318,10 +1326,9 @@ private void handleMmTable(Path specPath, String unionSuffix, Configuration hcon } private void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manifestDir, - DynamicPartitionCtx dpCtx, String unionSuffix, + DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx, String unionSuffix, ValidWriteIds.IdPathFilter filter) throws IOException { - FileStatus[] files = getMmDirectoryCandidates(fs, specPath, - dpCtx == null ? 0 : dpCtx.getNumDPCols(), unionSuffix, filter); + FileStatus[] files = getMmDirectoryCandidates(fs, specPath, dpCtx, lbCtx, unionSuffix, filter); if (files != null) { for (FileStatus status : files) { Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 03abdc186403..a2eff8bad8b4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -1599,7 +1599,7 @@ public static HashMap removeTempOrDuplicateFiles(FileStatus[ } } else { String taskId = getPrefixedTaskIdFromFilename(one.getPath().getName()); - Utilities.LOG14535.info("removeTempOrDuplicateFiles pondering " + one.getPath() + ", taskId " + taskId/*, new Exception()*/); + Utilities.LOG14535.info("removeTempOrDuplicateFiles pondering " + one.getPath() + ", taskId " + taskId); FileStatus otherFile = taskIdToFile.get(taskId); if (otherFile == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index c3e2681f130f..a539799e1262 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -399,6 +399,7 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job private void processForWriteIds(Path dir, JobConf conf, ValidWriteIds writeIds, List finalPaths) throws IOException { FileSystem fs = dir.getFileSystem(conf); + Utilities.LOG14535.warn("Checking " + dir + " (root) for inputs"); FileStatus[] files = fs.listStatus(dir); // TODO: batch? LinkedList subdirs = new LinkedList<>(); for (FileStatus file : files) { @@ -415,6 +416,7 @@ private void processForWriteIds(Path dir, JobConf conf, private void handleNonMmDirChild(FileStatus file, ValidWriteIds writeIds, LinkedList subdirs, List finalPaths) { Path path = file.getPath(); + Utilities.LOG14535.warn("Checking " + path + " for inputs"); if (!file.isDirectory()) { Utilities.LOG14535.warn("Ignoring a file not in MM directory " + path); return; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 73a3b1957300..84c1e7bce6d7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1587,12 +1587,12 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par List newFiles = null; if (mmWriteId != null) { Utilities.LOG14535.info("not moving " + loadPath + " to " + newPartPath); - assert !isAcid && !replace; + assert !isAcid; if (areEventsForDmlNeeded(tbl, oldPart)) { newFiles = listFilesCreatedByQuery(loadPath, mmWriteId); } - if (replace) { - Path tableDest = tbl.getPath(); + Utilities.LOG14535.info("maybe deleting stuff from " + oldPartPath + " (new " + newPartPath + ") for replace"); + if (replace && oldPartPath != null) { // TODO# is this correct? ignore until iow jira deleteOldPathForReplace(newPartPath, oldPartPath, getConf(), new ValidWriteIds.IdPathFilter(mmWriteId, false)); } diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index cc44c19333d5..1f85c483de1b 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -9,8 +9,8 @@ set hive.exec.dynamic.partition.mode=nonstrict; -- Force multiple writers when reading drop table intermediate; create table intermediate(key int) partitioned by (p int) stored as orc; -insert into table intermediate partition(p='455') select key from src limit 2; -insert into table intermediate partition(p='456') select key from src limit 2; +insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; drop table part_mm; create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true'); @@ -18,21 +18,20 @@ explain insert into table part_mm partition(key_mm='455') select key from interm insert into table part_mm partition(key_mm='455') select key from intermediate; insert into table part_mm partition(key_mm='456') select key from intermediate; insert into table part_mm partition(key_mm='455') select key from intermediate; -select * from part_mm; +select * from part_mm order by key; drop table part_mm; drop table simple_mm; create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true'); insert into table simple_mm select key from intermediate; insert overwrite table simple_mm select key from intermediate; -select * from simple_mm; +select * from simple_mm order by key; insert into table simple_mm select key from intermediate; -select * from simple_mm; +select * from simple_mm order by key; drop table simple_mm; --- simple DP (no bucketing, no sorting?) -drop table dp_no_mm; +-- simple DP (no bucketing) drop table dp_mm; set hive.exec.dynamic.partition.mode=nonstrict; @@ -41,18 +40,13 @@ set hive.merge.mapredfiles=false; set hive.merge.sparkfiles=false; set hive.merge.tezfiles=false; -create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc; create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc tblproperties ('hivecommit'='true'); -insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate; - insert into table dp_mm partition (key1='123', key2) select key, key from intermediate; -select * from dp_no_mm; -select * from dp_mm; +select * from dp_mm order by key; -drop table dp_no_mm; drop table dp_mm; @@ -108,10 +102,34 @@ select key as p, key from intermediate union all select key + 1 as p, key + 1 from intermediate ) temps; -select * from partunion_mm; +select * from partunion_mm order by id; drop table partunion_mm; --- TODO# from here, fix it + + +create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='true'); + +insert into table skew_mm +select key, key, key from intermediate; + +select * from skew_mm order by k2; +drop table skew_mm; + + +create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true'); + +insert into table skew_dp_union_mm partition (k3) +select key as i, key as j, key as k, key as l from intermediate +union all +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate; + + +select * from skew_dp_union_mm order by k2; +drop table skew_dp_union_mm; + + @@ -122,24 +140,12 @@ drop table partunion_mm; ---drop table partunion_mm; --drop table merge_mm; --drop table ctas_mm; ---drop table T1; ---drop table T2; ---drop table skew_mm; -- -- --create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; -- ---create table partunion_mm(id_mm int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); --- --- ---insert into table partunion_mm partition(key_mm) ---select temps.* from ( ---select key as key_mm, key from ctas_mm ---union all ---select key as key_mm, key from simple_mm ) temps; -- --set hive.merge.mapredfiles=true; --set hive.merge.sparkfiles=true; @@ -158,22 +164,6 @@ drop table partunion_mm; -- FROM src; -- -- ---set hive.optimize.skewjoin.compiletime = true; ----- the test case is wrong? --- ---CREATE TABLE T1(key STRING, val STRING) ---SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; ---LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; ---CREATE TABLE T2(key STRING, val STRING) ---SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; ---LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; --- ---EXPLAIN ---SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; --- ---create table skew_mm(k1 string, k2 string, k3 string, k4 string) SKEWED BY (key) ON ((2)) tblproperties ('hivecommit'='true'); ---INSERT OVERWRITE TABLE skew_mm ---SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; -- ---- TODO load, multi-insert etc -- diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index e1fb3d9be773..ceb7a1ae26d4 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -12,26 +12,41 @@ insert into table intermediate partition(p='455') select key from src limit 2; insert into table intermediate partition(p='456') select key from src limit 2; -create table partunion_no_mm(id int) partitioned by (key int); -insert into table partunion_no_mm partition(key) -select temps.* from ( -select key as p, key from intermediate -union all -select key + 1 as p, key + 1 from intermediate ) temps; +set hive.optimize.skewjoin.compiletime = true; + +create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='false'); + +insert into table skew_mm +select key, key, key from intermediate; + +drop table skew_mm; + + +create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='true'); -select * from partunion_no_mm; -drop table partunion_no_mm; +insert into table skew_mm +select key, key, key from intermediate; +select * from skew_mm; +drop table skew_mm; -create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true'); -insert into table partunion_mm partition(key) -select temps.* from ( -select key as p, key from intermediate + + + + +create table skew_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true'); + +insert into table skew_mm partition (k3) +select key as i, key as j, key as k, key as l from intermediate union all -select key + 1 as p, key + 1 from intermediate ) temps; +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate; + -select * from partunion_mm; -drop table partunion_mm; +select * from skew_mm; +drop table skew_mm; diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out index 0a8bb40dbeb0..b70ae3c592bf 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -12,20 +12,20 @@ POSTHOOK: query: create table intermediate(key int) partitioned by (p int) store POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@intermediate -PREHOOK: query: insert into table intermediate partition(p='455') select key from src limit 2 +PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@intermediate@p=455 -POSTHOOK: query: insert into table intermediate partition(p='455') select key from src limit 2 +POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=455 POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='456') select key from src limit 2 +PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@intermediate@p=456 -POSTHOOK: query: insert into table intermediate partition(p='456') select key from src limit 2 +POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 @@ -135,30 +135,30 @@ POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 POSTHOOK: Output: default@part_mm@key_mm=455 POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from part_mm +PREHOOK: query: select * from part_mm order by key PREHOOK: type: QUERY PREHOOK: Input: default@part_mm PREHOOK: Input: default@part_mm@key_mm=455 PREHOOK: Input: default@part_mm@key_mm=456 #### A masked pattern was here #### -POSTHOOK: query: select * from part_mm +POSTHOOK: query: select * from part_mm order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@part_mm POSTHOOK: Input: default@part_mm@key_mm=455 POSTHOOK: Input: default@part_mm@key_mm=456 #### A masked pattern was here #### 0 455 -455 455 0 455 -455 455 -0 455 -455 455 -0 455 -455 455 0 456 -455 456 -0 456 -455 456 +10 456 +10 455 +10 455 +97 456 +97 455 +97 455 +98 455 +98 455 +98 456 PREHOOK: query: drop table part_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@part_mm @@ -205,18 +205,18 @@ POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 POSTHOOK: Output: default@simple_mm POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from simple_mm +PREHOOK: query: select * from simple_mm order by key PREHOOK: type: QUERY PREHOOK: Input: default@simple_mm #### A masked pattern was here #### -POSTHOOK: query: select * from simple_mm +POSTHOOK: query: select * from simple_mm order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@simple_mm #### A masked pattern was here #### 0 -455 -0 -455 +10 +97 +98 PREHOOK: query: insert into table simple_mm select key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate @@ -230,22 +230,22 @@ POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 POSTHOOK: Output: default@simple_mm POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from simple_mm +PREHOOK: query: select * from simple_mm order by key PREHOOK: type: QUERY PREHOOK: Input: default@simple_mm #### A masked pattern was here #### -POSTHOOK: query: select * from simple_mm +POSTHOOK: query: select * from simple_mm order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@simple_mm #### A masked pattern was here #### 0 -455 0 -455 -0 -455 -0 -455 +10 +10 +97 +97 +98 +98 PREHOOK: query: drop table simple_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@simple_mm @@ -254,24 +254,12 @@ POSTHOOK: query: drop table simple_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@simple_mm POSTHOOK: Output: default@simple_mm -PREHOOK: query: -- simple DP (no bucketing, no sorting?) -drop table dp_no_mm +PREHOOK: query: -- simple DP (no bucketing) +drop table dp_mm PREHOOK: type: DROPTABLE -POSTHOOK: query: -- simple DP (no bucketing, no sorting?) -drop table dp_no_mm +POSTHOOK: query: -- simple DP (no bucketing) +drop table dp_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table dp_mm -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table dp_mm -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@dp_no_mm -POSTHOOK: query: create table dp_no_mm (key int) partitioned by (key1 string, key2 int) stored as orc -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@dp_no_mm PREHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc tblproperties ('hivecommit'='true') PREHOOK: type: CREATETABLE @@ -282,21 +270,6 @@ POSTHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dp_mm -PREHOOK: query: insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@dp_no_mm@key1=123 -POSTHOOK: query: insert into table dp_no_mm partition (key1='123', key2) select key, key from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@dp_no_mm@key1=123/key2=0 -POSTHOOK: Output: default@dp_no_mm@key1=123/key2=455 -POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dp_no_mm PARTITION(key1=123,key2=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate @@ -309,49 +282,33 @@ POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 POSTHOOK: Output: default@dp_mm@key1=123/key2=0 -POSTHOOK: Output: default@dp_mm@key1=123/key2=455 +POSTHOOK: Output: default@dp_mm@key1=123/key2=10 +POSTHOOK: Output: default@dp_mm@key1=123/key2=97 +POSTHOOK: Output: default@dp_mm@key1=123/key2=98 POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from dp_no_mm -PREHOOK: type: QUERY -PREHOOK: Input: default@dp_no_mm -PREHOOK: Input: default@dp_no_mm@key1=123/key2=0 -PREHOOK: Input: default@dp_no_mm@key1=123/key2=455 -#### A masked pattern was here #### -POSTHOOK: query: select * from dp_no_mm -POSTHOOK: type: QUERY -POSTHOOK: Input: default@dp_no_mm -POSTHOOK: Input: default@dp_no_mm@key1=123/key2=0 -POSTHOOK: Input: default@dp_no_mm@key1=123/key2=455 -#### A masked pattern was here #### -455 123 455 -455 123 455 -0 123 0 -0 123 0 -PREHOOK: query: select * from dp_mm +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=10).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=97).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=98).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from dp_mm order by key PREHOOK: type: QUERY PREHOOK: Input: default@dp_mm PREHOOK: Input: default@dp_mm@key1=123/key2=0 -PREHOOK: Input: default@dp_mm@key1=123/key2=455 +PREHOOK: Input: default@dp_mm@key1=123/key2=10 +PREHOOK: Input: default@dp_mm@key1=123/key2=97 +PREHOOK: Input: default@dp_mm@key1=123/key2=98 #### A masked pattern was here #### -POSTHOOK: query: select * from dp_mm +POSTHOOK: query: select * from dp_mm order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@dp_mm POSTHOOK: Input: default@dp_mm@key1=123/key2=0 -POSTHOOK: Input: default@dp_mm@key1=123/key2=455 +POSTHOOK: Input: default@dp_mm@key1=123/key2=10 +POSTHOOK: Input: default@dp_mm@key1=123/key2=97 +POSTHOOK: Input: default@dp_mm@key1=123/key2=98 #### A masked pattern was here #### -455 123 455 -455 123 455 -0 123 0 0 123 0 -PREHOOK: query: drop table dp_no_mm -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@dp_no_mm -PREHOOK: Output: default@dp_no_mm -POSTHOOK: query: drop table dp_no_mm -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@dp_no_mm -POSTHOOK: Output: default@dp_no_mm +10 123 10 +97 123 97 +98 123 98 PREHOOK: query: drop table dp_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@dp_mm @@ -402,13 +359,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@union_mm #### A masked pattern was here #### 0 -0 1 -1 -455 -455 -456 -456 +10 +11 +97 +98 +98 +99 PREHOOK: query: insert into table union_mm select p from ( @@ -448,20 +405,23 @@ POSTHOOK: Input: default@union_mm #### A masked pattern was here #### 0 0 -0 -1 1 1 2 -2 -455 -455 -455 -456 -456 -456 -457 -457 +10 +10 +11 +11 +12 +97 +97 +98 +98 +98 +99 +99 +99 +100 PREHOOK: query: insert into table union_mm SELECT p FROM ( @@ -516,25 +476,32 @@ POSTHOOK: Input: default@union_mm 0 0 0 -0 -1 1 1 1 2 2 -2 -455 -455 -455 -455 -456 -456 -456 -456 -457 -457 -457 +10 +10 +10 +11 +11 +11 +12 +12 +97 +97 +97 +98 +98 +98 +98 +99 +99 +99 +99 +100 +100 PREHOOK: query: drop table union_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@union_mm @@ -572,36 +539,48 @@ POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 POSTHOOK: Output: default@partunion_mm@key=0 POSTHOOK: Output: default@partunion_mm@key=1 -POSTHOOK: Output: default@partunion_mm@key=455 -POSTHOOK: Output: default@partunion_mm@key=456 +POSTHOOK: Output: default@partunion_mm@key=10 +POSTHOOK: Output: default@partunion_mm@key=11 +POSTHOOK: Output: default@partunion_mm@key=97 +POSTHOOK: Output: default@partunion_mm@key=98 +POSTHOOK: Output: default@partunion_mm@key=99 POSTHOOK: Lineage: partunion_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=10).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: partunion_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: partunion_mm PARTITION(key=455).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: partunion_mm PARTITION(key=456).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from partunion_mm +POSTHOOK: Lineage: partunion_mm PARTITION(key=97).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=98).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=99).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from partunion_mm order by id PREHOOK: type: QUERY PREHOOK: Input: default@partunion_mm PREHOOK: Input: default@partunion_mm@key=0 PREHOOK: Input: default@partunion_mm@key=1 -PREHOOK: Input: default@partunion_mm@key=455 -PREHOOK: Input: default@partunion_mm@key=456 +PREHOOK: Input: default@partunion_mm@key=10 +PREHOOK: Input: default@partunion_mm@key=11 +PREHOOK: Input: default@partunion_mm@key=97 +PREHOOK: Input: default@partunion_mm@key=98 +PREHOOK: Input: default@partunion_mm@key=99 #### A masked pattern was here #### -POSTHOOK: query: select * from partunion_mm +POSTHOOK: query: select * from partunion_mm order by id POSTHOOK: type: QUERY POSTHOOK: Input: default@partunion_mm POSTHOOK: Input: default@partunion_mm@key=0 POSTHOOK: Input: default@partunion_mm@key=1 -POSTHOOK: Input: default@partunion_mm@key=455 -POSTHOOK: Input: default@partunion_mm@key=456 +POSTHOOK: Input: default@partunion_mm@key=10 +POSTHOOK: Input: default@partunion_mm@key=11 +POSTHOOK: Input: default@partunion_mm@key=97 +POSTHOOK: Input: default@partunion_mm@key=98 +POSTHOOK: Input: default@partunion_mm@key=99 #### A masked pattern was here #### 0 0 -0 0 -1 1 1 1 -455 455 -455 455 -456 456 -456 456 +10 10 +11 11 +97 97 +98 98 +98 98 +99 99 PREHOOK: query: drop table partunion_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@partunion_mm @@ -610,35 +589,164 @@ POSTHOOK: query: drop table partunion_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@partunion_mm POSTHOOK: Output: default@partunion_mm -PREHOOK: query: -- TODO# from here, fix it - - - - --- future +PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@skew_mm +POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@skew_mm +PREHOOK: query: insert into table skew_mm +select key, key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@skew_mm +POSTHOOK: query: insert into table skew_mm +select key, key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@skew_mm +POSTHOOK: Lineage: skew_mm.k1 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm.k2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm.k4 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from skew_mm order by k2 +PREHOOK: type: QUERY +PREHOOK: Input: default@skew_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from skew_mm order by k2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@skew_mm +#### A masked pattern was here #### +0 0 0 +10 10 10 +97 97 97 +98 98 98 +PREHOOK: query: drop table skew_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@skew_mm +PREHOOK: Output: default@skew_mm +POSTHOOK: query: drop table skew_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@skew_mm +POSTHOOK: Output: default@skew_mm +PREHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@skew_dp_union_mm +POSTHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@skew_dp_union_mm +PREHOOK: query: insert into table skew_dp_union_mm partition (k3) +select key as i, key as j, key as k, key as l from intermediate +union all +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@skew_dp_union_mm +POSTHOOK: query: insert into table skew_dp_union_mm partition (k3) +select key as i, key as j, key as k, key as l from intermediate +union all +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@skew_dp_union_mm@k3=0 +POSTHOOK: Output: default@skew_dp_union_mm@k3=10 +POSTHOOK: Output: default@skew_dp_union_mm@k3=101 +POSTHOOK: Output: default@skew_dp_union_mm@k3=102 +POSTHOOK: Output: default@skew_dp_union_mm@k3=14 +POSTHOOK: Output: default@skew_dp_union_mm@k3=4 +POSTHOOK: Output: default@skew_dp_union_mm@k3=97 +POSTHOOK: Output: default@skew_dp_union_mm@k3=98 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from skew_dp_union_mm order by k2 +PREHOOK: type: QUERY +PREHOOK: Input: default@skew_dp_union_mm +PREHOOK: Input: default@skew_dp_union_mm@k3=0 +PREHOOK: Input: default@skew_dp_union_mm@k3=10 +PREHOOK: Input: default@skew_dp_union_mm@k3=101 +PREHOOK: Input: default@skew_dp_union_mm@k3=102 +PREHOOK: Input: default@skew_dp_union_mm@k3=14 +PREHOOK: Input: default@skew_dp_union_mm@k3=4 +PREHOOK: Input: default@skew_dp_union_mm@k3=97 +PREHOOK: Input: default@skew_dp_union_mm@k3=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from skew_dp_union_mm order by k2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@skew_dp_union_mm +POSTHOOK: Input: default@skew_dp_union_mm@k3=0 +POSTHOOK: Input: default@skew_dp_union_mm@k3=10 +POSTHOOK: Input: default@skew_dp_union_mm@k3=101 +POSTHOOK: Input: default@skew_dp_union_mm@k3=102 +POSTHOOK: Input: default@skew_dp_union_mm@k3=14 +POSTHOOK: Input: default@skew_dp_union_mm@k3=4 +POSTHOOK: Input: default@skew_dp_union_mm@k3=97 +POSTHOOK: Input: default@skew_dp_union_mm@k3=98 +#### A masked pattern was here #### +0 0 0 0 +1 2 3 4 +10 10 10 10 +11 12 13 14 +97 97 97 97 +98 98 98 98 +98 99 100 101 +99 100 101 102 +PREHOOK: query: drop table skew_dp_union_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@skew_dp_union_mm +PREHOOK: Output: default@skew_dp_union_mm +POSTHOOK: query: drop table skew_dp_union_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@skew_dp_union_mm +POSTHOOK: Output: default@skew_dp_union_mm +PREHOOK: query: -- future ---drop table partunion_mm; --drop table merge_mm; --drop table ctas_mm; ---drop table T1; ---drop table T2; ---drop table skew_mm; -- -- --create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; -- ---create table partunion_mm(id_mm int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); --- --- ---insert into table partunion_mm partition(key_mm) ---select temps.* from ( ---select key as key_mm, key from ctas_mm ---union all ---select key as key_mm, key from simple_mm ) temps; -- --set hive.merge.mapredfiles=true; --set hive.merge.sparkfiles=true; @@ -657,22 +765,6 @@ PREHOOK: query: -- TODO# from here, fix it -- FROM src; -- -- ---set hive.optimize.skewjoin.compiletime = true; ----- the test case is wrong? --- ---CREATE TABLE T1(key STRING, val STRING) ---SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; ---LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; ---CREATE TABLE T2(key STRING, val STRING) ---SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; ---LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; --- ---EXPLAIN ---SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; --- ---create table skew_mm(k1 string, k2 string, k3 string, k4 string) SKEWED BY (key) ON ((2)) tblproperties ('hivecommit'='true'); ---INSERT OVERWRITE TABLE skew_mm ---SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; -- ---- TODO load, multi-insert etc -- @@ -682,35 +774,18 @@ drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: -- TODO# from here, fix it - - - - --- future +POSTHOOK: query: -- future ---drop table partunion_mm; --drop table merge_mm; --drop table ctas_mm; ---drop table T1; ---drop table T2; ---drop table skew_mm; -- -- --create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; -- ---create table partunion_mm(id_mm int) partitioned by (key_mm int) tblproperties ('hivecommit'='true'); --- --- ---insert into table partunion_mm partition(key_mm) ---select temps.* from ( ---select key as key_mm, key from ctas_mm ---union all ---select key as key_mm, key from simple_mm ) temps; -- --set hive.merge.mapredfiles=true; --set hive.merge.sparkfiles=true; @@ -729,22 +804,6 @@ POSTHOOK: query: -- TODO# from here, fix it -- FROM src; -- -- ---set hive.optimize.skewjoin.compiletime = true; ----- the test case is wrong? --- ---CREATE TABLE T1(key STRING, val STRING) ---SKEWED BY (key) ON ((2)) STORED AS TEXTFILE; ---LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1; ---CREATE TABLE T2(key STRING, val STRING) ---SKEWED BY (key) ON ((3)) STORED AS TEXTFILE; ---LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2; --- ---EXPLAIN ---SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key; --- ---create table skew_mm(k1 string, k2 string, k3 string, k4 string) SKEWED BY (key) ON ((2)) tblproperties ('hivecommit'='true'); ---INSERT OVERWRITE TABLE skew_mm ---SELECT a.key as k1, a.val as k2, b.key as k3, b.val as k4 FROM T1 a JOIN T2 b ON a.key = b.key; -- ---- TODO load, multi-insert etc -- diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index 87214ba55d60..d6d31ea24bc6 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -28,140 +28,163 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: create table partunion_no_mm(id int) partitioned by (key int) +PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='false') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partunion_no_mm -POSTHOOK: query: create table partunion_no_mm(id int) partitioned by (key int) +PREHOOK: Output: default@skew_mm +POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='false') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partunion_no_mm -PREHOOK: query: insert into table partunion_no_mm partition(key) -select temps.* from ( -select key as p, key from intermediate -union all -select key + 1 as p, key + 1 from intermediate ) temps +POSTHOOK: Output: default@skew_mm +PREHOOK: query: insert into table skew_mm +select key, key, key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@partunion_no_mm -POSTHOOK: query: insert into table partunion_no_mm partition(key) -select temps.* from ( -select key as p, key from intermediate -union all -select key + 1 as p, key + 1 from intermediate ) temps +PREHOOK: Output: default@skew_mm +POSTHOOK: query: insert into table skew_mm +select key, key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@skew_mm +POSTHOOK: Lineage: skew_mm.k1 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm.k2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm.k4 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: drop table skew_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@skew_mm +PREHOOK: Output: default@skew_mm +POSTHOOK: query: drop table skew_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@skew_mm +POSTHOOK: Output: default@skew_mm +PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@skew_mm +POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@skew_mm +PREHOOK: query: insert into table skew_mm +select key, key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@skew_mm +POSTHOOK: query: insert into table skew_mm +select key, key, key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@partunion_no_mm@key=0 -POSTHOOK: Output: default@partunion_no_mm@key=1 -POSTHOOK: Output: default@partunion_no_mm@key=455 -POSTHOOK: Output: default@partunion_no_mm@key=456 -POSTHOOK: Lineage: partunion_no_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: partunion_no_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: partunion_no_mm PARTITION(key=455).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: partunion_no_mm PARTITION(key=456).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from partunion_no_mm +POSTHOOK: Output: default@skew_mm +POSTHOOK: Lineage: skew_mm.k1 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm.k2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm.k4 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from skew_mm PREHOOK: type: QUERY -PREHOOK: Input: default@partunion_no_mm -PREHOOK: Input: default@partunion_no_mm@key=0 -PREHOOK: Input: default@partunion_no_mm@key=1 -PREHOOK: Input: default@partunion_no_mm@key=455 -PREHOOK: Input: default@partunion_no_mm@key=456 +PREHOOK: Input: default@skew_mm #### A masked pattern was here #### -POSTHOOK: query: select * from partunion_no_mm +POSTHOOK: query: select * from skew_mm POSTHOOK: type: QUERY -POSTHOOK: Input: default@partunion_no_mm -POSTHOOK: Input: default@partunion_no_mm@key=0 -POSTHOOK: Input: default@partunion_no_mm@key=1 -POSTHOOK: Input: default@partunion_no_mm@key=455 -POSTHOOK: Input: default@partunion_no_mm@key=456 +POSTHOOK: Input: default@skew_mm #### A masked pattern was here #### -0 0 -0 0 -1 1 -1 1 -455 455 -455 455 -456 456 -456 456 -PREHOOK: query: drop table partunion_no_mm +455 455 455 +455 455 455 +0 0 0 +0 0 0 +PREHOOK: query: drop table skew_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@partunion_no_mm -PREHOOK: Output: default@partunion_no_mm -POSTHOOK: query: drop table partunion_no_mm +PREHOOK: Input: default@skew_mm +PREHOOK: Output: default@skew_mm +POSTHOOK: query: drop table skew_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@partunion_no_mm -POSTHOOK: Output: default@partunion_no_mm -PREHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true') +POSTHOOK: Input: default@skew_mm +POSTHOOK: Output: default@skew_mm +PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@partunion_mm -POSTHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true') +PREHOOK: Output: default@skew_mm +POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@partunion_mm -PREHOOK: query: insert into table partunion_mm partition(key) -select temps.* from ( -select key as p, key from intermediate +POSTHOOK: Output: default@skew_mm +PREHOOK: query: insert into table skew_mm partition (k3) +select key as i, key as j, key as k, key as l from intermediate union all -select key + 1 as p, key + 1 from intermediate ) temps +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@partunion_mm -POSTHOOK: query: insert into table partunion_mm partition(key) -select temps.* from ( -select key as p, key from intermediate +PREHOOK: Output: default@skew_mm +POSTHOOK: query: insert into table skew_mm partition (k3) +select key as i, key as j, key as k, key as l from intermediate union all -select key + 1 as p, key + 1 from intermediate ) temps +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@partunion_mm@key=0 -POSTHOOK: Output: default@partunion_mm@key=1 -POSTHOOK: Output: default@partunion_mm@key=455 -POSTHOOK: Output: default@partunion_mm@key=456 -POSTHOOK: Lineage: partunion_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: partunion_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: partunion_mm PARTITION(key=455).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: partunion_mm PARTITION(key=456).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from partunion_mm +POSTHOOK: Output: default@skew_mm@k3=0 +POSTHOOK: Output: default@skew_mm@k3=4 +POSTHOOK: Output: default@skew_mm@k3=455 +POSTHOOK: Output: default@skew_mm@k3=459 +POSTHOOK: Lineage: skew_mm PARTITION(k3=0).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=0).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=0).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=455).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=455).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=455).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=459).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=459).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=459).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=4).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=4).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm PARTITION(k3=4).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from skew_mm PREHOOK: type: QUERY -PREHOOK: Input: default@partunion_mm -PREHOOK: Input: default@partunion_mm@key=0 -PREHOOK: Input: default@partunion_mm@key=1 -PREHOOK: Input: default@partunion_mm@key=455 -PREHOOK: Input: default@partunion_mm@key=456 +PREHOOK: Input: default@skew_mm +PREHOOK: Input: default@skew_mm@k3=0 +PREHOOK: Input: default@skew_mm@k3=4 +PREHOOK: Input: default@skew_mm@k3=455 +PREHOOK: Input: default@skew_mm@k3=459 #### A masked pattern was here #### -POSTHOOK: query: select * from partunion_mm +POSTHOOK: query: select * from skew_mm POSTHOOK: type: QUERY -POSTHOOK: Input: default@partunion_mm -POSTHOOK: Input: default@partunion_mm@key=0 -POSTHOOK: Input: default@partunion_mm@key=1 -POSTHOOK: Input: default@partunion_mm@key=455 -POSTHOOK: Input: default@partunion_mm@key=456 +POSTHOOK: Input: default@skew_mm +POSTHOOK: Input: default@skew_mm@k3=0 +POSTHOOK: Input: default@skew_mm@k3=4 +POSTHOOK: Input: default@skew_mm@k3=455 +POSTHOOK: Input: default@skew_mm@k3=459 #### A masked pattern was here #### -0 0 -0 0 -1 1 -1 1 -455 455 -455 455 -456 456 -456 456 -PREHOOK: query: drop table partunion_mm +0 0 0 0 +0 0 0 0 +1 2 3 4 +1 2 3 4 +455 455 455 455 +455 455 455 455 +456 457 458 459 +456 457 458 459 +PREHOOK: query: drop table skew_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@partunion_mm -PREHOOK: Output: default@partunion_mm -POSTHOOK: query: drop table partunion_mm +PREHOOK: Input: default@skew_mm +PREHOOK: Output: default@skew_mm +POSTHOOK: query: drop table skew_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@partunion_mm -POSTHOOK: Output: default@partunion_mm +POSTHOOK: Input: default@skew_mm +POSTHOOK: Output: default@skew_mm PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate From eacf9f9b6d7405b68def88ffc5fd755222375efc Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Thu, 13 Oct 2016 17:18:46 -0700 Subject: [PATCH 09/24] HIVE-14640 : handle hive.merge.*files in select queries (Sergey Shelukhin) --- .../org/apache/hadoop/hive/conf/HiveConf.java | 2 +- .../ql/exec/AbstractFileMergeOperator.java | 181 +++++++++----- .../hadoop/hive/ql/exec/FileSinkOperator.java | 187 ++------------ .../apache/hadoop/hive/ql/exec/MoveTask.java | 8 +- .../hive/ql/exec/OrcFileMergeOperator.java | 11 +- .../hive/ql/exec/RCFileMergeOperator.java | 3 +- .../apache/hadoop/hive/ql/exec/Utilities.java | 225 ++++++++++++++++- .../rcfile/truncate/ColumnTruncateMapper.java | 1 + .../apache/hadoop/hive/ql/metadata/Hive.java | 1 + .../hive/ql/optimizer/GenMapRedUtils.java | 214 +++++++++------- .../hive/ql/parse/DDLSemanticAnalyzer.java | 9 +- .../hadoop/hive/ql/parse/GenTezUtils.java | 4 +- .../hive/ql/parse/SemanticAnalyzer.java | 4 +- .../plan/ConditionalResolverMergeFiles.java | 17 +- .../hadoop/hive/ql/plan/FileMergeDesc.java | 9 + .../hadoop/hive/ql/plan/FileSinkDesc.java | 14 +- .../apache/hadoop/hive/ql/plan/MoveWork.java | 10 +- ql/src/test/queries/clientpositive/mm_all.q | 57 +++-- .../test/queries/clientpositive/mm_current.q | 40 +-- .../results/clientpositive/llap/mm_all.q.out | 232 ++++++++++++++---- .../clientpositive/llap/mm_current.q.out | 165 +------------ 21 files changed, 758 insertions(+), 636 deletions(-) diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index c89142c1b4ec..6201c045b0f3 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -3122,7 +3122,7 @@ public static enum ConfVars { HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT("hive.metastore.mm.heartbeat.timeout", "1800s", new TimeValidator(TimeUnit.SECONDS), - "MM write ID times out after this long if a heartbeat is not send. Currently disabled."), // TODO# heartbeating not implemented + "MM write ID times out after this long if a heartbeat is not send. Currently disabled."), HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT("hive.metastore.mm.absolute.timeout", "7d", new TimeValidator(TimeUnit.SECONDS), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java index 40c784bf7673..dedbb786f49e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java @@ -34,6 +34,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Lists; + /** * Fast file merge operator for ORC and RCfile. This is an abstract class which * does not process any rows. Refer {@link org.apache.hadoop.hive.ql.exec.OrcFileMergeOperator} @@ -47,20 +49,21 @@ public abstract class AbstractFileMergeOperator protected JobConf jc; protected FileSystem fs; - protected boolean autoDelete; - protected boolean exception; - protected Path outPath; - protected Path finalPath; - protected Path dpPath; - protected Path tmpPath; - protected Path taskTmpPath; - protected int listBucketingDepth; - protected boolean hasDynamicPartitions; - protected boolean isListBucketingAlterTableConcatenate; - protected boolean tmpPathFixedConcatenate; - protected boolean tmpPathFixed; - protected Set incompatFileSet; - protected transient DynamicPartitionCtx dpCtx; + private boolean autoDelete; + private Path outPath; // The output path used by the subclasses. + private Path finalPath; // Used as a final destination; same as outPath for MM tables. + private Path dpPath; + private Path tmpPath; // Only stored to update based on the original in fixTmpPath. + private Path taskTmpPath; // Only stored to update based on the original in fixTmpPath. + private int listBucketingDepth; + private boolean hasDynamicPartitions; + private boolean isListBucketingAlterTableConcatenate; + private boolean tmpPathFixedConcatenate; + private boolean tmpPathFixed; + private Set incompatFileSet; + private transient DynamicPartitionCtx dpCtx; + private boolean isMmTable; + private String taskId; /** Kryo ctor. */ protected AbstractFileMergeOperator() { @@ -77,39 +80,50 @@ public void initializeOp(Configuration hconf) throws HiveException { this.jc = new JobConf(hconf); incompatFileSet = new HashSet(); autoDelete = false; - exception = false; tmpPathFixed = false; tmpPathFixedConcatenate = false; - outPath = null; - finalPath = null; dpPath = null; - tmpPath = null; - taskTmpPath = null; dpCtx = conf.getDpCtx(); hasDynamicPartitions = conf.hasDynamicPartitions(); isListBucketingAlterTableConcatenate = conf .isListBucketingAlterTableConcatenate(); listBucketingDepth = conf.getListBucketingDepth(); Path specPath = conf.getOutputPath(); - updatePaths(Utilities.toTempPath(specPath), - Utilities.toTaskTempPath(specPath)); + isMmTable = conf.getMmWriteId() != null; + if (isMmTable) { + updatePaths(specPath, null); + } else { + updatePaths(Utilities.toTempPath(specPath), Utilities.toTaskTempPath(specPath)); + } try { fs = specPath.getFileSystem(hconf); - autoDelete = fs.deleteOnExit(outPath); + if (!isMmTable) { + // Do not delete for MM tables. We either want the file if we succeed, or we must + // delete is explicitly before proceeding if the merge fails. + autoDelete = fs.deleteOnExit(outPath); + } } catch (IOException e) { - this.exception = true; - throw new HiveException("Failed to initialize AbstractFileMergeOperator", - e); + throw new HiveException("Failed to initialize AbstractFileMergeOperator", e); } } // sets up temp and task temp path private void updatePaths(Path tp, Path ttp) { - String taskId = Utilities.getTaskId(jc); + if (taskId == null) { + taskId = Utilities.getTaskId(jc); + } tmpPath = tp; - taskTmpPath = ttp; - finalPath = new Path(tp, taskId); - outPath = new Path(ttp, Utilities.toTempPath(taskId)); + if (isMmTable) { + taskTmpPath = null; + // Make sure we don't collide with the source. + outPath = finalPath = new Path(tmpPath, taskId + ".merged"); + } else { + taskTmpPath = ttp; + finalPath = new Path(tp, taskId); + outPath = new Path(ttp, Utilities.toTempPath(taskId)); + } + Utilities.LOG14535.info("Paths for merge " + taskId + ": tmp " + tmpPath + ", task " + + taskTmpPath + ", final " + finalPath + ", out " + outPath, new Exception()); } /** @@ -142,7 +156,7 @@ private void updatePaths(Path tp, Path ttp) { protected void fixTmpPath(Path inputPath, int depthDiff) throws IOException { // don't need to update tmp paths when there is no depth difference in paths - if (depthDiff <=0) { + if (depthDiff <= 0) { return; } @@ -157,10 +171,12 @@ protected void fixTmpPath(Path inputPath, int depthDiff) throws IOException { } Path newTmpPath = new Path(tmpPath, newPath); - Path newTaskTmpPath = new Path(taskTmpPath, newPath); if (!fs.exists(newTmpPath)) { + Utilities.LOG14535.info("Creating " + newTmpPath); fs.mkdirs(newTmpPath); } + + Path newTaskTmpPath = (taskTmpPath != null) ? new Path(taskTmpPath, newPath) : null; updatePaths(newTmpPath, newTaskTmpPath); } @@ -182,7 +198,7 @@ protected void checkPartitionsMatch(Path inputPath) throws IOException { } protected void fixTmpPath(Path path) throws IOException { - + Utilities.LOG14535.info("Calling fixTmpPath with " + path); // Fix temp path for alter table ... concatenate if (isListBucketingAlterTableConcatenate) { if (this.tmpPathFixedConcatenate) { @@ -208,38 +224,49 @@ protected void fixTmpPath(Path path) throws IOException { @Override public void closeOp(boolean abort) throws HiveException { try { - if (!abort) { - // if outPath does not exist, then it means all paths within combine split are skipped as - // they are incompatible for merge (for example: files without stripe stats). - // Those files will be added to incompatFileSet - if (fs.exists(outPath)) { - FileStatus fss = fs.getFileStatus(outPath); + if (abort) { + if (!autoDelete || isMmTable) { + fs.delete(outPath, true); + } + return; + } + // if outPath does not exist, then it means all paths within combine split are skipped as + // they are incompatible for merge (for example: files without stripe stats). + // Those files will be added to incompatFileSet + if (fs.exists(outPath)) { + FileStatus fss = fs.getFileStatus(outPath); + if (!isMmTable) { if (!fs.rename(outPath, finalPath)) { - throw new IOException( - "Unable to rename " + outPath + " to " + finalPath); + throw new IOException("Unable to rename " + outPath + " to " + finalPath); } - LOG.info("renamed path " + outPath + " to " + finalPath + " . File" + - " size is " - + fss.getLen()); + LOG.info("Renamed path " + outPath + " to " + finalPath + + "(" + fss.getLen() + " bytes)."); + } else { + assert finalPath.equals(outPath); + // There's always just one file that we have merged. + // The union/DP/etc. should already be account for in the path. + Utilities.writeMmCommitManifest(Lists.newArrayList(outPath), + tmpPath.getParent(), fs, taskId, conf.getMmWriteId(), null); + LOG.info("Merged into " + finalPath + "(" + fss.getLen() + " bytes)."); } + } - // move any incompatible files to final path - if (incompatFileSet != null && !incompatFileSet.isEmpty()) { - for (Path incompatFile : incompatFileSet) { - Path destDir = finalPath.getParent(); - try { - Utilities.renameOrMoveFiles(fs, incompatFile, destDir); - LOG.info("Moved incompatible file " + incompatFile + " to " + - destDir); - } catch (HiveException e) { - LOG.error("Unable to move " + incompatFile + " to " + destDir); - throw new IOException(e); - } - } + // move any incompatible files to final path + if (incompatFileSet != null && !incompatFileSet.isEmpty()) { + if (isMmTable) { + // We only support query-time merge for MM tables, so don't handle this. + throw new HiveException("Incompatible files should not happen in MM tables."); } - } else { - if (!autoDelete) { - fs.delete(outPath, true); + for (Path incompatFile : incompatFileSet) { + Path destDir = finalPath.getParent(); + try { + Utilities.renameOrMoveFiles(fs, incompatFile, destDir); + LOG.info("Moved incompatible file " + incompatFile + " to " + + destDir); + } catch (HiveException e) { + LOG.error("Unable to move " + incompatFile + " to " + destDir); + throw new IOException(e); + } } } } catch (IOException e) { @@ -253,16 +280,26 @@ public void jobCloseOp(Configuration hconf, boolean success) try { Path outputDir = conf.getOutputPath(); FileSystem fs = outputDir.getFileSystem(hconf); - Path backupPath = backupOutputPath(fs, outputDir); - // TODO# merge-related move - Utilities.mvFileToFinalPath(outputDir, hconf, success, LOG, conf.getDpCtx(), - null, reporter); - if (success) { - LOG.info("jobCloseOp moved merged files to output dir: " + outputDir); - } - if (backupPath != null) { - fs.delete(backupPath, true); + Long mmWriteId = conf.getMmWriteId(); + if (mmWriteId == null) { + Path backupPath = backupOutputPath(fs, outputDir); + Utilities.mvFileToFinalPath( + outputDir, hconf, success, LOG, conf.getDpCtx(), null, reporter); + if (success) { + LOG.info("jobCloseOp moved merged files to output dir: " + outputDir); + } + if (backupPath != null) { + fs.delete(backupPath, true); + } + } else { + int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), + lbLevels = conf.getListBucketingDepth(); + // We don't expect missing buckets from mere (actually there should be no buckets), + // so just pass null as bucketing context. Union suffix should also be accounted for. + Utilities.handleMmTableFinalPath(outputDir.getParent(), null, hconf, success, + dpLevels, lbLevels, null, mmWriteId, reporter); } + } catch (IOException e) { throw new HiveException("Failed jobCloseOp for AbstractFileMergeOperator", e); @@ -290,4 +327,12 @@ public String getName() { public static String getOperatorName() { return "MERGE"; } + + protected final Path getOutPath() { + return outPath; + } + + protected final void addIncompatibleFile(Path path) { + incompatFileSet.add(path); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 5902036705e0..dda4b5155eb6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.exec.Utilities.MissingBucketsContext; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveKey; @@ -98,7 +99,6 @@ public class FileSinkOperator extends TerminalOperator implements Serializable { - private static final String MANIFEST_EXTENSION = ".manifest"; public static final Logger LOG = LoggerFactory.getLogger(FileSinkOperator.class); private static final boolean isInfoEnabled = LOG.isInfoEnabled(); private static final boolean isDebugEnabled = LOG.isDebugEnabled(); @@ -1128,26 +1128,9 @@ public void closeOp(boolean abort) throws HiveException { fsp.commit(fs, commitPaths); } } - if (!commitPaths.isEmpty()) { - Path manifestPath = getManifestDir(specPath, childSpecPathDynLinkedPartitions); - manifestPath = new Path(manifestPath, "_tmp." + ValidWriteIds.getMmFilePrefix( - conf.getMmWriteId()) + "_" + taskId + MANIFEST_EXTENSION); - Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths); - try { - // Don't overwrite the manifest... should fail if we have collisions. - // We assume one FSOP per task (per specPath), so we create it in specPath. - try (FSDataOutputStream out = fs.create(manifestPath, false)) { - if (out == null) { - throw new HiveException("Failed to create manifest at " + manifestPath); - } - out.writeInt(commitPaths.size()); - for (Path path : commitPaths) { - out.writeUTF(path.toString()); - } - } - } catch (IOException e) { - throw new HiveException(e); - } + if (conf.getMmWriteId() != null) { + Utilities.writeMmCommitManifest(commitPaths, specPath, fs, taskId, conf.getMmWriteId(), + childSpecPathDynLinkedPartitions); } // Only publish stats if this operator's flag was set to gather stats if (conf.isGatherStats()) { @@ -1165,9 +1148,6 @@ public void closeOp(boolean abort) throws HiveException { super.closeOp(abort); } - private static Path getManifestDir(Path specPath, String unionSuffix) { - return (unionSuffix == null) ? specPath : new Path(specPath, unionSuffix); - } /** * @return the name of the operator @@ -1196,9 +1176,17 @@ public void jobCloseOp(Configuration hconf, boolean success) unionSuffix = conf.getDirName().getName(); } if (!conf.isMmTable()) { - Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, reporter); // TODO# other callers + Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, reporter); } else { - handleMmTable(specPath, unionSuffix, hconf, success, dpCtx, lbCtx, conf, reporter); + int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), + lbLevels = lbCtx.calculateListBucketingLevel(); + // TODO: why is it stored in both? + int numBuckets = (conf.getTable() != null) ? conf.getTable().getNumBuckets() + : (dpCtx != null ? dpCtx.getNumBuckets() : 0); + MissingBucketsContext mbc = new MissingBucketsContext( + conf.getTableInfo(), numBuckets, conf.getCompressed()); + Utilities.handleMmTableFinalPath(specPath, unionSuffix, hconf, success, + dpLevels, lbLevels, mbc, conf.getMmWriteId(), reporter); } } } catch (IOException e) { @@ -1207,152 +1195,6 @@ public void jobCloseOp(Configuration hconf, boolean success) super.jobCloseOp(hconf, success); } - private static FileStatus[] getMmDirectoryCandidates(FileSystem fs, Path path, - DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx, String unionSuffix, PathFilter filter) - throws IOException { - StringBuilder sb = new StringBuilder(path.toUri().getPath()); - int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), - lbLevels = lbCtx == null ? 0 : lbCtx.getSkewedColNames().size(); - for (int i = 0; i < dpLevels + lbLevels; i++) { - sb.append(Path.SEPARATOR).append("*"); - } - if (unionSuffix != null) { - sb.append(Path.SEPARATOR).append(unionSuffix); - } - sb.append(Path.SEPARATOR).append("*"); // TODO: we could add exact mm prefix here - Utilities.LOG14535.info("Looking for files via: " + sb.toString()); - Path pathPattern = new Path(path, sb.toString()); - return fs.globStatus(pathPattern, filter); - } - - private void handleMmTable(Path specPath, String unionSuffix, Configuration hconf, - boolean success, DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx, FileSinkDesc conf, - Reporter reporter) throws IOException, HiveException { - FileSystem fs = specPath.getFileSystem(hconf); - // Manifests would be at the root level, but the results at target level. - // TODO# special case - doesn't take bucketing into account - Path manifestDir = getManifestDir(specPath, unionSuffix); - - ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(conf.getMmWriteId(), true); - if (!success) { - tryDeleteAllMmFiles(fs, specPath, manifestDir, dpCtx, lbCtx, unionSuffix, filter); - return; - } - FileStatus[] files = HiveStatsUtils.getFileStatusRecurse(manifestDir, 1, fs, filter); - Utilities.LOG14535.info("Looking for manifests in: " + manifestDir); - List manifests = new ArrayList<>(); - if (files != null) { - for (FileStatus status : files) { - Path path = status.getPath(); - if (path.getName().endsWith(MANIFEST_EXTENSION)) { - Utilities.LOG14535.info("Reading manifest " + path); - manifests.add(path); - } - } - } - - Utilities.LOG14535.info("Looking for files in: " + specPath); - files = getMmDirectoryCandidates(fs, specPath, dpCtx, lbCtx, unionSuffix, filter); - ArrayList results = new ArrayList<>(); - if (files != null) { - for (FileStatus status : files) { - Path path = status.getPath(); - Utilities.LOG14535.info("Looking at path: " + path + " from " + System.identityHashCode(this)); - if (!status.isDirectory()) { - if (!path.getName().endsWith(MANIFEST_EXTENSION)) { - Utilities.LOG14535.warn("Unknown file found, deleting: " + path); - tryDelete(fs, path); - } - } else { - results.add(status); - } - } - } - - HashSet committed = new HashSet<>(); - for (Path mfp : manifests) { - try (FSDataInputStream mdis = fs.open(mfp)) { - int fileCount = mdis.readInt(); - for (int i = 0; i < fileCount; ++i) { - String nextFile = mdis.readUTF(); - if (!committed.add(nextFile)) { - throw new HiveException(nextFile + " was specified in multiple manifests"); - } - } - } - } - - for (FileStatus status : results) { - for (FileStatus child : fs.listStatus(status.getPath())) { - Path childPath = child.getPath(); - if (committed.remove(childPath.toString())) continue; // A good file. - Utilities.LOG14535.info("Deleting " + childPath + " that was not committed"); - // We should actually succeed here - if we fail, don't commit the query. - if (!fs.delete(childPath, true)) { - throw new HiveException("Failed to delete an uncommitted path " + childPath); - } - } - } - - if (!committed.isEmpty()) { - throw new HiveException("The following files were committed but not found: " + committed); - } - for (Path mfp : manifests) { - Utilities.LOG14535.info("Deleting manifest " + mfp); - tryDelete(fs, mfp); - } - // Delete the manifest directory if we only created it for manifests; otherwise the - // dynamic partition loader will find it and try to load it as a partition... what a mess. - if (manifestDir != specPath) { - FileStatus[] remainingFiles = fs.listStatus(manifestDir); - if (remainingFiles == null || remainingFiles.length == 0) { - Utilities.LOG14535.info("Deleting directory " + manifestDir); - tryDelete(fs, manifestDir); - } - } - - if (results.isEmpty()) return; - - // TODO: see HIVE-14886 - removeTempOrDuplicateFiles is broken for list bucketing, - // so maintain parity here by not calling it at all. - if (lbCtx != null) return; - FileStatus[] finalResults = results.toArray(new FileStatus[results.size()]); - List emptyBuckets = Utilities.removeTempOrDuplicateFiles( - fs, finalResults, dpCtx, conf, hconf); - // create empty buckets if necessary - if (emptyBuckets.size() > 0) { - Utilities.createEmptyBuckets(hconf, emptyBuckets, conf, reporter); - } - } - - private void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manifestDir, - DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx, String unionSuffix, - ValidWriteIds.IdPathFilter filter) throws IOException { - FileStatus[] files = getMmDirectoryCandidates(fs, specPath, dpCtx, lbCtx, unionSuffix, filter); - if (files != null) { - for (FileStatus status : files) { - Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); - tryDelete(fs, status.getPath()); - } - } - files = HiveStatsUtils.getFileStatusRecurse(manifestDir, 1, fs, filter); - if (files != null) { - for (FileStatus status : files) { - Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); - tryDelete(fs, status.getPath()); - } - } - } - - - private void tryDelete(FileSystem fs, Path path) { - try { - fs.delete(path, true); - } catch (IOException ex) { - LOG.error("Failed to delete " + path, ex); - } - } - @Override public OperatorType getType() { return OperatorType.FILESINK; @@ -1427,7 +1269,6 @@ private void publishStats() throws HiveException { for (Map.Entry entry : valToPaths.entrySet()) { String fspKey = entry.getKey(); // DP/LB FSPaths fspValue = entry.getValue(); - // TODO# useful code as reference, as it takes apart the crazy paths // for bucketed tables, hive.optimize.sort.dynamic.partition optimization // adds the taskId to the fspKey. if (conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 9bc4836c1da4..f2b8ca382c2b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -256,6 +256,9 @@ public TaskInformation(Task task, String path) { @Override public int execute(DriverContext driverContext) { + Utilities.LOG14535.info("Executing MoveWork " + System.identityHashCode(work) + + " with " + work.getLoadFileWork() + "; " + work.getLoadTableWork() + "; " + + work.getLoadMultiFilesWork(), new Exception()); try { if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { @@ -315,15 +318,14 @@ public int execute(DriverContext driverContext) { boolean isAcid = work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID; if (tbd.isMmTable() && isAcid) { - // TODO# need to make sure ACID writes to final directories. Otherwise, might need to move. - throw new HiveException("ACID and MM are not supported"); + throw new HiveException("ACID and MM are not supported"); } // Create a data container DataContainer dc = null; if (tbd.getPartitionSpec().size() == 0) { dc = new DataContainer(table.getTTable()); - Utilities.LOG14535.info("loadTable called from " + tbd.getSourcePath() + " into " + tbd.getTable()); + Utilities.LOG14535.info("loadTable called from " + tbd.getSourcePath() + " into " + tbd.getTable().getTableName(), new Exception()); db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getReplace(), work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isAcid, hasFollowingStatsTask(), tbd.getMmWriteId()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java index e3cb765e0a82..835791b86397 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java @@ -75,6 +75,7 @@ public void process(Object row, int tag) throws HiveException { private void processKeyValuePairs(Object key, Object value) throws HiveException { String filePath = ""; + boolean exception = false; try { OrcFileValueWrapper v; OrcFileKeyWrapper k; @@ -87,12 +88,15 @@ private void processKeyValuePairs(Object key, Object value) // skip incompatible file, files that are missing stripe statistics are set to incompatible if (k.isIncompatFile()) { LOG.warn("Incompatible ORC file merge! Stripe statistics is missing. " + k.getInputPath()); - incompatFileSet.add(k.getInputPath()); + addIncompatibleFile(k.getInputPath()); return; } filePath = k.getInputPath().toUri().getPath(); + Utilities.LOG14535.info("OrcFileMergeOperator processing " + filePath, new Exception()); + + fixTmpPath(k.getInputPath().getParent()); v = (OrcFileValueWrapper) value; @@ -126,6 +130,7 @@ private void processKeyValuePairs(Object key, Object value) options.bufferSize(compressBuffSize).enforceBufferSize(); } + Path outPath = getOutPath(); outWriter = OrcFile.createWriter(outPath, options); if (isLogDebugEnabled) { LOG.info("ORC merge file output path: " + outPath); @@ -133,7 +138,7 @@ private void processKeyValuePairs(Object key, Object value) } if (!checkCompatibility(k)) { - incompatFileSet.add(k.getInputPath()); + addIncompatibleFile(k.getInputPath()); return; } @@ -164,7 +169,7 @@ private void processKeyValuePairs(Object key, Object value) outWriter.appendUserMetadata(v.getUserMetadata()); } } catch (Throwable e) { - this.exception = true; + exception = true; LOG.error("Closing operator..Exception: " + ExceptionUtils.getStackTrace(e)); throw new HiveException(e); } finally { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java index 4dea1d20cf57..349b459f418c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java @@ -77,7 +77,7 @@ private void processKeyValuePairs(Object k, Object v) codec = key.getCodec(); columnNumber = key.getKeyBuffer().getColumnNumber(); RCFileOutputFormat.setColumnNumber(jc, columnNumber); - outWriter = new RCFile.Writer(fs, jc, outPath, null, codec); + outWriter = new RCFile.Writer(fs, jc, getOutPath(), null, codec); } boolean sameCodec = ((codec == key.getCodec()) || codec.getClass().equals( @@ -94,7 +94,6 @@ private void processKeyValuePairs(Object k, Object v) key.getRecordLength(), key.getKeyLength(), key.getCompressedKeyLength()); } catch (Throwable e) { - this.exception = true; closeOp(true); throw new HiveException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index d343e3241c93..49bdd84ba7a2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -83,6 +83,8 @@ import org.apache.hadoop.filecache.DistributedCache; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -94,6 +96,7 @@ import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; @@ -146,6 +149,7 @@ import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.MergeJoinWork; @@ -1411,7 +1415,7 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, boolean success, Logger log, DynamicPartitionCtx dpCtx, FileSinkDesc conf, Reporter reporter) throws IOException, HiveException { - + FileSystem fs = specPath.getFileSystem(hconf); Path tmpPath = Utilities.toTempPath(specPath); Path taskTmpPath = Utilities.toTaskTempPath(specPath); @@ -1422,12 +1426,14 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin("FileSinkOperator", "RemoveTempOrDuplicateFiles"); // remove any tmp file or double-committed output files - List emptyBuckets = Utilities.removeTempOrDuplicateFiles(fs, statuses, dpCtx, conf, hconf); + List emptyBuckets = Utilities.removeTempOrDuplicateFiles( + fs, statuses, dpCtx, conf, hconf); perfLogger.PerfLogEnd("FileSinkOperator", "RemoveTempOrDuplicateFiles"); // create empty buckets if necessary if (emptyBuckets.size() > 0) { perfLogger.PerfLogBegin("FileSinkOperator", "CreateEmptyBuckets"); - createEmptyBuckets(hconf, emptyBuckets, conf, reporter); + createEmptyBuckets( + hconf, emptyBuckets, conf.getCompressed(), conf.getTableInfo(), reporter); perfLogger.PerfLogEnd("FileSinkOperator", "CreateEmptyBuckets"); } // move to the file destination @@ -1457,7 +1463,7 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, * @throws IOException */ static void createEmptyBuckets(Configuration hconf, List paths, - FileSinkDesc conf, Reporter reporter) + boolean isCompressed, TableDesc tableInfo, Reporter reporter) throws HiveException, IOException { JobConf jc; @@ -1469,13 +1475,11 @@ static void createEmptyBuckets(Configuration hconf, List paths, } HiveOutputFormat hiveOutputFormat = null; Class outputClass = null; - boolean isCompressed = conf.getCompressed(); - TableDesc tableInfo = conf.getTableInfo(); try { Serializer serializer = (Serializer) tableInfo.getDeserializerClass().newInstance(); serializer.initialize(null, tableInfo.getProperties()); outputClass = serializer.getSerializedClass(); - hiveOutputFormat = HiveFileFormatUtils.getHiveOutputFormat(hconf, conf.getTableInfo()); + hiveOutputFormat = HiveFileFormatUtils.getHiveOutputFormat(hconf, tableInfo); } catch (SerDeException e) { throw new HiveException(e); } catch (InstantiationException e) { @@ -1518,13 +1522,21 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, Path path, */ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats, DynamicPartitionCtx dpCtx, FileSinkDesc conf, Configuration hconf) throws IOException { + int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), + numBuckets = (conf != null && conf.getTable() != null) + ? conf.getTable().getNumBuckets() : 0; + return removeTempOrDuplicateFiles(fs, fileStats, dpLevels, numBuckets, hconf); + } + + public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats, + int dpLevels, int numBuckets, Configuration hconf) throws IOException { if (fileStats == null) { return null; } List result = new ArrayList(); HashMap taskIDToFile = null; - if (dpCtx != null) { + if (dpLevels > 0) { FileStatus parts[] = fileStats; for (int i = 0; i < parts.length; ++i) { @@ -1543,14 +1555,14 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] taskIDToFile = removeTempOrDuplicateFiles(items, fs); // if the table is bucketed and enforce bucketing, we should check and generate all buckets - if (dpCtx.getNumBuckets() > 0 && taskIDToFile != null && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { + if (numBuckets > 0 && taskIDToFile != null && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { // refresh the file list items = fs.listStatus(parts[i].getPath()); // get the missing buckets and generate empty buckets String taskID1 = taskIDToFile.keySet().iterator().next(); Path bucketPath = taskIDToFile.values().iterator().next().getPath(); Utilities.LOG14535.info("Bucket path " + bucketPath); - for (int j = 0; j < dpCtx.getNumBuckets(); ++j) { + for (int j = 0; j < numBuckets; ++j) { addBucketFileIfMissing(result, taskIDToFile, taskID1, bucketPath, j); } } @@ -1561,13 +1573,13 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] return result; } taskIDToFile = removeTempOrDuplicateFiles(items, fs); - if(taskIDToFile != null && taskIDToFile.size() > 0 && conf != null && conf.getTable() != null - && (conf.getTable().getNumBuckets() > taskIDToFile.size()) && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { + if(taskIDToFile != null && taskIDToFile.size() > 0 && (numBuckets > taskIDToFile.size()) + && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { // get the missing buckets and generate empty buckets for non-dynamic partition String taskID1 = taskIDToFile.keySet().iterator().next(); Path bucketPath = taskIDToFile.values().iterator().next().getPath(); Utilities.LOG14535.info("Bucket path " + bucketPath); - for (int j = 0; j < conf.getTable().getNumBuckets(); ++j) { + for (int j = 0; j < numBuckets; ++j) { addBucketFileIfMissing(result, taskIDToFile, taskID1, bucketPath, j); } } @@ -3746,4 +3758,191 @@ public static String humanReadableByteCount(long bytes) { String suffix = "KMGTPE".charAt(exp-1) + ""; return String.format("%.2f%sB", bytes / Math.pow(unit, exp), suffix); } + + private static final String MANIFEST_EXTENSION = ".manifest"; + + private static Path getManifestDir(Path specPath, String unionSuffix) { + return (unionSuffix == null) ? specPath : new Path(specPath, unionSuffix); + } + + private static void tryDelete(FileSystem fs, Path path) { + try { + fs.delete(path, true); + } catch (IOException ex) { + LOG.error("Failed to delete " + path, ex); + } + } + + private static FileStatus[] getMmDirectoryCandidates(FileSystem fs, Path path, + int dpLevels, int lbLevels, String unionSuffix, PathFilter filter) throws IOException { + StringBuilder sb = new StringBuilder(path.toUri().getPath()); + for (int i = 0; i < dpLevels + lbLevels; i++) { + sb.append(Path.SEPARATOR).append("*"); + } + if (unionSuffix != null) { + sb.append(Path.SEPARATOR).append(unionSuffix); + } + sb.append(Path.SEPARATOR).append("*"); // TODO: we could add exact mm prefix here + Utilities.LOG14535.info("Looking for files via: " + sb.toString()); + Path pathPattern = new Path(path, sb.toString()); + return fs.globStatus(pathPattern, filter); + } + + private static void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manifestDir, + int dpLevels, int lbLevels, String unionSuffix, ValidWriteIds.IdPathFilter filter) + throws IOException { + FileStatus[] files = getMmDirectoryCandidates( + fs, specPath, dpLevels, lbLevels, unionSuffix, filter); + if (files != null) { + for (FileStatus status : files) { + Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); + tryDelete(fs, status.getPath()); + } + } + files = HiveStatsUtils.getFileStatusRecurse(manifestDir, 1, fs, filter); + if (files != null) { + for (FileStatus status : files) { + Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); + tryDelete(fs, status.getPath()); + } + } + } + + + public static void writeMmCommitManifest(List commitPaths, Path specPath, FileSystem fs, + String taskId, Long mmWriteId, String unionSuffix) throws HiveException { + if (commitPaths.isEmpty()) return; + Path manifestPath = getManifestDir(specPath, unionSuffix); + manifestPath = new Path(manifestPath, "_tmp." + ValidWriteIds.getMmFilePrefix( + mmWriteId) + "_" + taskId + MANIFEST_EXTENSION); + Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths); + try { + // Don't overwrite the manifest... should fail if we have collisions. + // We assume one FSOP per task (per specPath), so we create it in specPath. + try (FSDataOutputStream out = fs.create(manifestPath, false)) { + if (out == null) { + throw new HiveException("Failed to create manifest at " + manifestPath); + } + out.writeInt(commitPaths.size()); + for (Path path : commitPaths) { + out.writeUTF(path.toString()); + } + } + } catch (IOException e) { + throw new HiveException(e); + } + } + + public static final class MissingBucketsContext { + public final TableDesc tableInfo; + public final int numBuckets; + public final boolean isCompressed; + public MissingBucketsContext(TableDesc tableInfo, int numBuckets, boolean isCompressed) { + this.tableInfo = tableInfo; + this.numBuckets = numBuckets; + this.isCompressed = isCompressed; + } + } + + public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Configuration hconf, + boolean success, int dpLevels, int lbLevels, MissingBucketsContext mbc, long mmWriteId, + Reporter reporter) throws IOException, HiveException { + FileSystem fs = specPath.getFileSystem(hconf); + // Manifests would be at the root level, but the results at target level. + // TODO# special case - doesn't take bucketing into account + Path manifestDir = getManifestDir(specPath, unionSuffix); + + ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); + if (!success) { + tryDeleteAllMmFiles(fs, specPath, manifestDir, dpLevels, lbLevels, unionSuffix, filter); + return; + } + FileStatus[] files = HiveStatsUtils.getFileStatusRecurse(manifestDir, 1, fs, filter); + Utilities.LOG14535.info("Looking for manifests in: " + manifestDir + " (" + mmWriteId + ")"); + List manifests = new ArrayList<>(); + if (files != null) { + for (FileStatus status : files) { + Path path = status.getPath(); + if (path.getName().endsWith(MANIFEST_EXTENSION)) { + Utilities.LOG14535.info("Reading manifest " + path); + manifests.add(path); + } + } + } + + Utilities.LOG14535.info("Looking for files in: " + specPath); + files = getMmDirectoryCandidates(fs, specPath, dpLevels, lbLevels, unionSuffix, filter); + ArrayList results = new ArrayList<>(); + if (files != null) { + for (FileStatus status : files) { + Path path = status.getPath(); + Utilities.LOG14535.info("Looking at path: " + path); + if (!status.isDirectory()) { + if (!path.getName().endsWith(MANIFEST_EXTENSION)) { + Utilities.LOG14535.warn("Unknown file found, deleting: " + path); + tryDelete(fs, path); + } + } else { + results.add(status); + } + } + } + + HashSet committed = new HashSet<>(); + for (Path mfp : manifests) { + try (FSDataInputStream mdis = fs.open(mfp)) { + int fileCount = mdis.readInt(); + for (int i = 0; i < fileCount; ++i) { + String nextFile = mdis.readUTF(); + if (!committed.add(nextFile)) { + throw new HiveException(nextFile + " was specified in multiple manifests"); + } + } + } + } + + for (FileStatus status : results) { + for (FileStatus child : fs.listStatus(status.getPath())) { + Path childPath = child.getPath(); + if (committed.remove(childPath.toString())) continue; // A good file. + Utilities.LOG14535.info("Deleting " + childPath + " that was not committed"); + // We should actually succeed here - if we fail, don't commit the query. + if (!fs.delete(childPath, true)) { + throw new HiveException("Failed to delete an uncommitted path " + childPath); + } + } + } + + if (!committed.isEmpty()) { + throw new HiveException("The following files were committed but not found: " + committed); + } + for (Path mfp : manifests) { + Utilities.LOG14535.info("Deleting manifest " + mfp); + tryDelete(fs, mfp); + } + // Delete the manifest directory if we only created it for manifests; otherwise the + // dynamic partition loader will find it and try to load it as a partition... what a mess. + if (manifestDir != specPath) { + FileStatus[] remainingFiles = fs.listStatus(manifestDir); + if (remainingFiles == null || remainingFiles.length == 0) { + Utilities.LOG14535.info("Deleting directory " + manifestDir); + tryDelete(fs, manifestDir); + } + } + + if (results.isEmpty()) return; + + // TODO: see HIVE-14886 - removeTempOrDuplicateFiles is broken for list bucketing, + // so maintain parity here by not calling it at all. + if (lbLevels != 0) return; + FileStatus[] finalResults = results.toArray(new FileStatus[results.size()]); + List emptyBuckets = Utilities.removeTempOrDuplicateFiles( + fs, finalResults, dpLevels, mbc == null ? 0 : mbc.numBuckets, hconf); + // create empty buckets if necessary + if (emptyBuckets.size() > 0) { + assert mbc != null; + Utilities.createEmptyBuckets(hconf, emptyBuckets, mbc.isCompressed, mbc.tableInfo, reporter); + } + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java index bd537cda4d05..d013c6f78969 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java @@ -234,6 +234,7 @@ public static void jobClose(Path outputPath, boolean success, JobConf job, ) throws HiveException, IOException { FileSystem fs = outputPath.getFileSystem(job); Path backupPath = backupOutputPath(fs, outputPath, job); + // TODO# special case - what is this about? Utilities.mvFileToFinalPath(outputPath, job, success, LOG, dynPartCtx, null, reporter); fs.delete(backupPath, true); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index e66948f79770..9a1c1fa20c08 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1842,6 +1842,7 @@ private Set getValidPartitionsInPath(int numDP, Path loadPath) throws Hive if (!s.isDirectory()) { throw new HiveException("partition " + s.getPath() + " is not a directory!"); } + Utilities.LOG14535.info("Found DP " + s.getPath()); validPartitions.add(s.getPath()); } } catch (IOException e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 675bfd028aaa..79ef4d096a89 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -93,6 +93,7 @@ import org.apache.hadoop.hive.ql.plan.FileMergeDesc; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc; +import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.LoadFileDesc; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredLocalWork; @@ -1256,23 +1257,28 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, List> mvTasks, HiveConf conf, Task currTask) throws SemanticException { + // // 1. create the operator tree // FileSinkDesc fsInputDesc = fsInput.getConf(); + Utilities.LOG14535.info("Creating merge work from " + System.identityHashCode(fsInput) + + " with write ID " + (fsInputDesc.isMmTable() ? fsInputDesc.getMmWriteId() : null) + " into " + finalName); // Create a TableScan operator RowSchema inputRS = fsInput.getSchema(); TableScanOperator tsMerge = GenMapRedUtils.createTemporaryTableScanOperator( fsInput.getCompilationOpContext(), inputRS); + Long srcMmWriteId = fsInputDesc.isMmTable() ? fsInputDesc.getMmWriteId() : null; + // Create a FileSink operator TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone(); - // TODO# special case #N - merge FS is created here - FileSinkDesc fsOutputDesc = new FileSinkDesc(finalName, ts, - conf.getBoolVar(ConfVars.COMPRESSRESULT)); - FileSinkOperator fsOutput = (FileSinkOperator) OperatorFactory.getAndMakeChild( - fsOutputDesc, inputRS, tsMerge); + FileSinkDesc fsOutputDesc = new FileSinkDesc( + finalName, ts, conf.getBoolVar(ConfVars.COMPRESSRESULT)); + fsOutputDesc.setMmWriteId(srcMmWriteId); + // Create and attach the filesink for the merge. We don't actually need it for anything here. + OperatorFactory.getAndMakeChild(fsOutputDesc, inputRS, tsMerge); // If the input FileSinkOperator is a dynamic partition enabled, the tsMerge input schema // needs to include the partition column, and the fsOutput should have @@ -1305,9 +1311,7 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, // // 2. Constructing a conditional task consisting of a move task and a map reduce task // - // TODO# movetask is created here; handle MM tables - MoveWork dummyMv = new MoveWork(null, null, null, - new LoadFileDesc(fsInputDesc.getFinalDirName(), finalName, true, null, null), false); + Path inputDirName = fsInputDesc.getMergeInputDirName(); MapWork cplan; Serializable work; @@ -1348,8 +1352,15 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, cplan.setInputformat("org.apache.hadoop.hive.ql.io.CombineHiveInputFormat"); // NOTE: we should gather stats in MR1 rather than MR2 at merge job since we don't // know if merge MR2 will be triggered at execution time + MoveWork dummyMv = null; + if (srcMmWriteId == null) { + // Only create the movework for non-MM table. No action needed for a MM table. + Utilities.LOG14535.info("creating dummy movetask for merge (with lfd)"); + dummyMv = new MoveWork(null, null, null, + new LoadFileDesc(inputDirName, finalName, true, null, null), false); + } ConditionalTask cndTsk = GenMapRedUtils.createCondTask(conf, currTask, dummyMv, work, - fsInputDesc.getFinalDirName().toString()); + fsInputDesc.getMergeInputDirName().toString()); // keep the dynamic partition context in conditional task resolver context ConditionalResolverMergeFilesCtx mrCtx = @@ -1360,7 +1371,13 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, // // 3. add the moveTask as the children of the conditional task // - linkMoveTask(fsOutput, cndTsk, mvTasks, conf, dependencyTask); + // Use the original fsOp path here in case of MM - while the new FSOP merges files inside the + // MM directory, the original MoveTask still commits based on the parent. Note that this path + // can only be triggered for a merge that's part of insert for now; MM tables do not support + // concatenate. Keeping the old logic for non-MM tables with temp directories and stuff. + Path fsopPath = srcMmWriteId != null + ? fsInputDesc.getFinalDirName() : fsOutputDesc.getFinalDirName(); + linkMoveTask(fsopPath, cndTsk, mvTasks, conf, dependencyTask); } /** @@ -1373,11 +1390,11 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, * @param hconf * @param dependencyTask */ - public static void linkMoveTask(FileSinkOperator newOutput, + private static void linkMoveTask(Path fsopPath, ConditionalTask cndTsk, List> mvTasks, HiveConf hconf, DependencyCollectionTask dependencyTask) { - Task mvTask = GenMapRedUtils.findMoveTask(mvTasks, newOutput); + Task mvTask = GenMapRedUtils.findMoveTaskForFsopOutput(mvTasks, fsopPath); for (Task tsk : cndTsk.getListTasks()) { linkMoveTask(mvTask, tsk, hconf, dependencyTask); @@ -1392,7 +1409,7 @@ public static void linkMoveTask(FileSinkOperator newOutput, * @param hconf * @param dependencyTask */ - public static void linkMoveTask(Task mvTask, + private static void linkMoveTask(Task mvTask, Task task, HiveConf hconf, DependencyCollectionTask dependencyTask) { @@ -1527,10 +1544,11 @@ private static MapWork createMRWorkForMergingFiles (HiveConf conf, TableScanOperator topOp, FileSinkDesc fsDesc) { ArrayList aliases = new ArrayList(); - Path inputDir = fsDesc.getFinalDirName(); + Path inputDir = fsDesc.getMergeInputDirName(); TableDesc tblDesc = fsDesc.getTableInfo(); aliases.add(inputDir.toString()); // dummy alias: just use the input path + Utilities.LOG14535.info("createMRWorkForMergingFiles for " + inputDir); // constructing the default MapredWork MapredWork cMrPlan = GenMapRedUtils.getMapRedWorkFromConf(conf); MapWork cplan = cMrPlan.getMapWork(); @@ -1555,8 +1573,9 @@ private static MapWork createMRWorkForMergingFiles (HiveConf conf, */ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, boolean hasDynamicPartitions, CompilationOpContext ctx) throws SemanticException { + + Path inputDir = fsInputDesc.getMergeInputDirName(); - Path inputDir = fsInputDesc.getFinalDirName(); TableDesc tblDesc = fsInputDesc.getTableInfo(); List inputDirs = new ArrayList(1); @@ -1580,6 +1599,7 @@ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, + " format other than RCFile or ORCFile"); } + Utilities.LOG14535.info("creating mergefilework from " + inputDirs + " to " + finalName); // create the merge file work MergeFileWork work = new MergeFileWork(inputDirs, finalName, hasDynamicPartitions, tblDesc.getInputFileFormatClass().getName()); @@ -1602,6 +1622,7 @@ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, } else { fmd = new OrcFileMergeDesc(); } + fmd.setMmWriteId(fsInputDesc.getMmWriteId()); fmd.setDpCtx(fsInputDesc.getDynPartCtx()); fmd.setOutputPath(finalName); fmd.setHasDynamicPartitions(work.hasDynamicPartitions()); @@ -1635,6 +1656,7 @@ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, public static ConditionalTask createCondTask(HiveConf conf, Task currTask, MoveWork mvWork, Serializable mergeWork, String inputPath) { + Utilities.LOG14535.info("Creating conditional merge task for " + inputPath); // There are 3 options for this ConditionalTask: // 1) Merge the partitions @@ -1642,10 +1664,14 @@ public static ConditionalTask createCondTask(HiveConf conf, // 3) Merge some partitions and move other partitions (i.e. merge some partitions and don't // merge others) in this case the merge is done first followed by the move to prevent // conflicts. + // TODO: if we are not dealing with concatenate DDL, we should not create a merge+move path + // because it should be impossible to get incompatible outputs. + // Create a dummy task if no move is needed. + Serializable moveWork = mvWork != null ? mvWork : new DependencyCollectionWork(); Task mergeOnlyMergeTask = TaskFactory.get(mergeWork, conf); - Task moveOnlyMoveTask = TaskFactory.get(mvWork, conf); + Task moveOnlyMoveTask = TaskFactory.get(moveWork, conf); Task mergeAndMoveMergeTask = TaskFactory.get(mergeWork, conf); - Task mergeAndMoveMoveTask = TaskFactory.get(mvWork, conf); + Task mergeAndMoveMoveTask = TaskFactory.get(moveWork, conf); // NOTE! It is necessary merge task is the parent of the move task, and not // the other way around, for the proper execution of the execute method of @@ -1653,7 +1679,7 @@ public static ConditionalTask createCondTask(HiveConf conf, mergeAndMoveMergeTask.addDependentTask(mergeAndMoveMoveTask); List listWorks = new ArrayList(); - listWorks.add(mvWork); + listWorks.add(moveWork); listWorks.add(mergeWork); ConditionalWork cndWork = new ConditionalWork(listWorks); @@ -1689,8 +1715,8 @@ public static boolean isSkewedStoredAsDirs(FileSinkDesc fsInputDesc) { .isSkewedStoredAsDir(); } - public static Task findMoveTask( - List> mvTasks, FileSinkOperator fsOp) { + public static Task findMoveTaskForFsopOutput( + List> mvTasks, Path fsopFinalDir) { // find the move task for (Task mvTsk : mvTasks) { MoveWork mvWork = mvTsk.getWork(); @@ -1700,9 +1726,10 @@ public static Task findMoveTask( } else if (mvWork.getLoadTableWork() != null) { srcDir = mvWork.getLoadTableWork().getSourcePath(); } + Utilities.LOG14535.info("Observing MoveWork " + System.identityHashCode(mvWork) + + " with " + srcDir + " while looking for " + fsopFinalDir); - if ((srcDir != null) - && (srcDir.equals(fsOp.getConf().getFinalDirName()))) { + if ((srcDir != null) && srcDir.equals(fsopFinalDir)) { return mvTsk; } } @@ -1722,59 +1749,58 @@ public static boolean isMergeRequired(List> mvTasks, HiveConf hco Task currTask, boolean isInsertTable) { // Has the user enabled merging of files for map-only jobs or for all jobs - if ((mvTasks != null) && (!mvTasks.isEmpty())) { - - // no need of merging if the move is to a local file system - MoveTask mvTask = (MoveTask) GenMapRedUtils.findMoveTask(mvTasks, fsOp); - - if (mvTask != null && isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) - && !fsOp.getConf().isMaterialization()) { - // mark the MapredWork and FileSinkOperator for gathering stats - fsOp.getConf().setGatherStats(true); - fsOp.getConf().setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE)); - if (!mvTask.hasFollowingStatsTask()) { - GenMapRedUtils.addStatsTask(fsOp, mvTask, currTask, hconf); - } + if (mvTasks == null || mvTasks.isEmpty()) return false; + + // no need of merging if the move is to a local file system + // We are looking based on the original FSOP, so use the original path as is. + MoveTask mvTask = (MoveTask) GenMapRedUtils.findMoveTaskForFsopOutput( + mvTasks, fsOp.getConf().getFinalDirName()); + + // TODO: wtf? wtf?!! why is this in this method? + if (mvTask != null && isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) + && !fsOp.getConf().isMaterialization()) { + // mark the MapredWork and FileSinkOperator for gathering stats + fsOp.getConf().setGatherStats(true); + fsOp.getConf().setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE)); + if (!mvTask.hasFollowingStatsTask()) { + GenMapRedUtils.addStatsTask(fsOp, mvTask, currTask, hconf); } + } - if ((mvTask != null) && !mvTask.isLocal() && fsOp.getConf().canBeMerged()) { + if (mvTask == null || mvTask.isLocal() || !fsOp.getConf().canBeMerged()) return false; - if (currTask.getWork() instanceof TezWork) { - // tez blurs the boundary between map and reduce, thus it has it's own - // config - return hconf.getBoolVar(ConfVars.HIVEMERGETEZFILES); - } else if (currTask.getWork() instanceof SparkWork) { - // spark has its own config for merging - return hconf.getBoolVar(ConfVars.HIVEMERGESPARKFILES); - } + if (currTask.getWork() instanceof TezWork) { + // tez blurs the boundary between map and reduce, thus it has it's own config + return hconf.getBoolVar(ConfVars.HIVEMERGETEZFILES); + } else if (currTask.getWork() instanceof SparkWork) { + // spark has its own config for merging + return hconf.getBoolVar(ConfVars.HIVEMERGESPARKFILES); + } + return isMergeRequiredForMr(hconf, fsOp, currTask); + } - if (fsOp.getConf().isLinkedFileSink()) { - // If the user has HIVEMERGEMAPREDFILES set to false, the idea was the - // number of reducers are few, so the number of files anyway are small. - // However, with this optimization, we are increasing the number of files - // possibly by a big margin. So, merge aggresively. - if (hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) || - hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES)) { - return true; - } - } else { - // There are separate configuration parameters to control whether to - // merge for a map-only job - // or for a map-reduce job - if (currTask.getWork() instanceof MapredWork) { - ReduceWork reduceWork = ((MapredWork) currTask.getWork()).getReduceWork(); - boolean mergeMapOnly = - hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) && reduceWork == null; - boolean mergeMapRed = - hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES) && - reduceWork != null; - if (mergeMapOnly || mergeMapRed) { - return true; - } - } else { - return false; - } - } + private static boolean isMergeRequiredForMr(HiveConf hconf, + FileSinkOperator fsOp, Task currTask) { + if (fsOp.getConf().isLinkedFileSink()) { + // If the user has HIVEMERGEMAPREDFILES set to false, the idea was the + // number of reducers are few, so the number of files anyway are small. + // However, with this optimization, we are increasing the number of files + // possibly by a big margin. So, merge aggresively. + return (hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) || + hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES)); + } + // There are separate configuration parameters to control whether to + // merge for a map-only job + // or for a map-reduce job + if (currTask.getWork() instanceof MapredWork) { + ReduceWork reduceWork = ((MapredWork) currTask.getWork()).getReduceWork(); + boolean mergeMapOnly = + hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) && reduceWork == null; + boolean mergeMapRed = + hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES) && + reduceWork != null; + if (mergeMapOnly || mergeMapRed) { + return true; } } return false; @@ -1798,36 +1824,38 @@ public static Path createMoveTask(Task currTask, boolean Path dest = null; + FileSinkDesc fileSinkDesc = fsOp.getConf(); + boolean isMmTable = fileSinkDesc.isMmTable(); if (chDir) { - FileSinkDesc fileSinkDesc = fsOp.getConf(); - dest = fileSinkDesc.getFinalDirName(); - - // generate the temporary file - // it must be on the same file system as the current destination - Context baseCtx = parseCtx.getContext(); - - // Create the required temporary file in the HDFS location if the destination - // path of the FileSinkOperator table is a blobstore path. - // TODO# special case #N - linked FDs (unions?) - Path tmpDir = baseCtx.getTempDirForPath(fileSinkDesc.getDestPath()); - - // Change all the linked file sink descriptors - if (fileSinkDesc.isLinkedFileSink()) { - for (FileSinkDesc fsConf:fileSinkDesc.getLinkedFileSinkDesc()) { - fsConf.setParentDir(tmpDir); - fsConf.setDirName(new Path(tmpDir, fsConf.getDirName().getName())); - Utilities.LOG14535.info("createMoveTask setting tmpDir for LinkedFileSink chDir " + fsConf.getDirName() + "; new parent " + tmpDir + ", dest was " + fileSinkDesc.getDestPath()); + + dest = fileSinkDesc.getMergeInputDirName(); + if (!isMmTable) { + // generate the temporary file + // it must be on the same file system as the current destination + Context baseCtx = parseCtx.getContext(); + + // Create the required temporary file in the HDFS location if the destination + // path of the FileSinkOperator table is a blobstore path. + Path tmpDir = baseCtx.getTempDirForPath(fileSinkDesc.getDestPath()); + + // Change all the linked file sink descriptors + if (fileSinkDesc.isLinkedFileSink()) { + for (FileSinkDesc fsConf:fileSinkDesc.getLinkedFileSinkDesc()) { + fsConf.setParentDir(tmpDir); + fsConf.setDirName(new Path(tmpDir, fsConf.getDirName().getName())); + Utilities.LOG14535.info("createMoveTask setting tmpDir for LinkedFileSink chDir " + fsConf.getDirName() + "; new parent " + tmpDir + ", dest was " + fileSinkDesc.getDestPath()); + } + } else { + fileSinkDesc.setDirName(tmpDir); + Utilities.LOG14535.info("createMoveTask setting tmpDir chDir " + tmpDir + "; dest was " + fileSinkDesc.getDestPath()); } - } else { - fileSinkDesc.setDirName(tmpDir); - Utilities.LOG14535.info("createMoveTask setting tmpDir for LinkedFileSink chDir " + tmpDir + "; dest was " + fileSinkDesc.getDestPath()); } } Task mvTask = null; if (!chDir) { - mvTask = GenMapRedUtils.findMoveTask(mvTasks, fsOp); + mvTask = GenMapRedUtils.findMoveTaskForFsopOutput(mvTasks, fsOp.getConf().getFinalDirName()); } // Set the move task to be dependent on the current task diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 5348500e3bc4..03c2e79815c9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1586,6 +1586,10 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, try { tblObj = getTable(tableName); + // TODO: we should probably block all ACID tables here. + if (MetaStoreUtils.isMmTable(tblObj.getParameters())) { + throw new SemanticException("Merge is not supported for MM tables"); + } List bucketCols = null; Class inputFormatClass = null; @@ -1676,9 +1680,8 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, partSpec == null ? new HashMap() : partSpec); ltd.setLbCtx(lbCtx); - // TODO# movetask is created here; handle MM tables - Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), - conf); + // No need to handle MM tables - unsupported path. + Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); mergeTask.addDependentTask(moveTsk); if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java index 73cc95a1294c..5c67fe2aee87 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java @@ -304,7 +304,6 @@ public static void removeUnionOperators(GenTezProcContext context, BaseWork work linked = context.linkedFileSinks.get(path); linked.add(desc); - // TODO# special case #N - unions (tez) desc.setDirName(new Path(path, "" + linked.size())); Utilities.LOG14535.info("removing union - new desc with " + desc.getDirName() + "; parent " + path); desc.setLinkedFileSink(true); @@ -374,8 +373,7 @@ public static void processFileSink(GenTezProcContext context, FileSinkOperator f // If underlying data is RCFile or OrcFile, RCFileBlockMerge task or // OrcFileStripeMerge task would be created. LOG.info("using CombineHiveInputformat for the merge job"); - Utilities.LOG14535.info("merging files from " + fileSink.getConf().getDirName() + " to " + finalName); - // TODO# special case #N - merge + Utilities.LOG14535.info("will generate MR work for merging files from " + fileSink.getConf().getDirName() + " to " + finalName); GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName, context.dependencyTask, context.moveTask, hconf, context.currentTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index ede1bda2f16c..66e2d27168fc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -6575,7 +6575,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } else { queryTmpdir = ctx.getTempDirForPath(dest_path); } - Utilities.LOG14535.info("createFS for table specifying " + queryTmpdir + " from " + dest_path); + Utilities.LOG14535.info("create filesink w/DEST_TABLE specifying " + queryTmpdir + " from " + dest_path); if (dpCtx != null) { // set the root of the temporary path where dynamic partition columns will populate dpCtx.setRootPath(queryTmpdir); @@ -6644,7 +6644,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) isMmTable = MetaStoreUtils.isMmTable(dest_tab.getParameters()); queryTmpdir = isMmTable ? dest_path : ctx.getTempDirForPath(dest_path); - Utilities.LOG14535.info("createFS for partition specifying " + queryTmpdir + " from " + dest_path); + Utilities.LOG14535.info("create filesink w/DEST_PARTITION specifying " + queryTmpdir + " from " + dest_path); table_desc = Utilities.getTableDesc(dest_tab); // Add sorting/bucketing if needed diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java index ffc9c3e5d97c..4635f185f123 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java @@ -75,14 +75,6 @@ public String getDir() { return dir; } - /** - * @param dir - * the dir to set - */ - public void setDir(String dir) { - this.dir = dir; - } - /** * @return the listTasks */ @@ -121,8 +113,7 @@ public void setLbCtx(ListBucketingCtx lbCtx) { } } - public List> getTasks(HiveConf conf, - Object objCtx) { + public List> getTasks(HiveConf conf, Object objCtx) { ConditionalResolverMergeFilesCtx ctx = (ConditionalResolverMergeFilesCtx) objCtx; String dirName = ctx.getDir(); @@ -179,6 +170,8 @@ public List> getTasks(HiveConf conf, if(lbLevel == 0) { // static partition without list bucketing long totalSz = getMergeSize(inpFs, dirPath, avgConditionSize); + Utilities.LOG14535.info("merge resolve simple case - totalSz " + totalSz + " from " + dirPath); + if (totalSz >= 0) { // add the merge job setupMapRedWork(conf, work, trgtSize, totalSz); resTsks.add(mrTask); @@ -192,6 +185,7 @@ public List> getTasks(HiveConf conf, } } } else { + Utilities.LOG14535.info("Resolver returning movetask for " + dirPath); resTsks.add(mvTask); } } catch (IOException e) { @@ -234,6 +228,7 @@ private void generateActualTasks(HiveConf conf, List mrTask, Task mrAndMvTask, Path dirPath, FileSystem inpFs, ConditionalResolverMergeFilesCtx ctx, MapWork work, int dpLbLevel) throws IOException { + Utilities.LOG14535.info("generateActualTasks for " + dirPath); DynamicPartitionCtx dpCtx = ctx.getDPCtx(); // get list of dynamic partitions FileStatus[] status = HiveStatsUtils.getFileStatusRecurse(dirPath, dpLbLevel, inpFs); @@ -281,6 +276,7 @@ private void generateActualTasks(HiveConf conf, List 0) { + // Note: this path should be specific to concatenate; never executed in a select query. // modify the existing move task as it is already in the candidate running tasks // running the MoveTask and MR task in parallel may @@ -362,6 +358,7 @@ private AverageSize getAverageSize(FileSystem inpFs, Path dirPath) { long totalSz = 0; int numFiles = 0; for (FileStatus fStat : fStats) { + Utilities.LOG14535.info("Resolver looking at " + fStat.getPath()); if (fStat.isDir()) { AverageSize avgSzDir = getAverageSize(inpFs, fStat.getPath()); if (avgSzDir.getTotalSize() < 0) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java index 7ec1bdd5e50c..615c63de1423 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java @@ -28,6 +28,7 @@ public class FileMergeDesc extends AbstractOperatorDesc { private int listBucketingDepth; private boolean hasDynamicPartitions; private boolean isListBucketingAlterTableConcatenate; + private Long mmWriteId; public FileMergeDesc(DynamicPartitionCtx dynPartCtx, Path outputDir) { this.dpCtx = dynPartCtx; @@ -73,4 +74,12 @@ public boolean isListBucketingAlterTableConcatenate() { public void setListBucketingAlterTableConcatenate(boolean isListBucketingAlterTableConcatenate) { this.isListBucketingAlterTableConcatenate = isListBucketingAlterTableConcatenate; } + + public Long getMmWriteId() { + return mmWriteId; + } + + public void setMmWriteId(Long mmWriteId) { + this.mmWriteId = mmWriteId; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index def1c5f7adb6..8bef7a9305b0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -22,6 +22,7 @@ import java.util.List; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -182,6 +183,13 @@ public Path getFinalDirName() { return linkedFileSink ? parentDir : dirName; } + /** getFinalDirName that takes into account MM, but not DP, LB or buckets. */ + public Path getMergeInputDirName() { + Path root = getFinalDirName(); + if (mmWriteId == null) return root; + return new Path(root, ValidWriteIds.getMmFilePrefix(mmWriteId)); + } + @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public TableDesc getTableInfo() { return tableInfo; @@ -255,7 +263,7 @@ public boolean isMmTable() { return mmWriteId != null; } - public long getMmWriteId() { + public Long getMmWriteId() { return mmWriteId; } @@ -485,6 +493,10 @@ public void setStatsTmpDir(String statsCollectionTempDir) { this.statsTmpDir = statsCollectionTempDir; } + public void setMmWriteId(Long mmWriteId) { + this.mmWriteId = mmWriteId; + } + public class FileSinkOperatorExplainVectorization extends OperatorExplainVectorization { public FileSinkOperatorExplainVectorization(VectorDesc vectorDesc) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java index 9f498c7fb88a..f0b2775579b3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java @@ -23,6 +23,7 @@ import java.util.List; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -59,7 +60,7 @@ public class MoveWork implements Serializable { public MoveWork() { } - public MoveWork(HashSet inputs, HashSet outputs) { + private MoveWork(HashSet inputs, HashSet outputs) { this.inputs = inputs; this.outputs = outputs; } @@ -68,6 +69,8 @@ public MoveWork(HashSet inputs, HashSet outputs, final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork, boolean checkFileFormat, boolean srcLocal) { this(inputs, outputs); + Utilities.LOG14535.info("Creating MoveWork " + System.identityHashCode(this) + + " with " + loadTableWork + "; " + loadFileWork); this.loadTableWork = loadTableWork; this.loadFileWork = loadFileWork; this.checkFileFormat = checkFileFormat; @@ -77,10 +80,7 @@ public MoveWork(HashSet inputs, HashSet outputs, public MoveWork(HashSet inputs, HashSet outputs, final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork, boolean checkFileFormat) { - this(inputs, outputs); - this.loadTableWork = loadTableWork; - this.loadFileWork = loadFileWork; - this.checkFileFormat = checkFileFormat; + this(inputs, outputs, loadTableWork, loadFileWork, checkFileFormat, false); } @Explain(displayName = "tables", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index 1f85c483de1b..8ce42a2cf698 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -131,6 +131,42 @@ drop table skew_dp_union_mm; +set hive.merge.orcfile.stripe.level=true; +set hive.merge.tezfiles=true; +set hive.merge.mapfiles=true; +set hive.merge.mapredfiles=true; + + +create table merge0_mm (id int) stored as orc tblproperties('hivecommit'='true'); + +insert into table merge0_mm select key from intermediate; +select * from merge0_mm; + +set tez.grouping.split-count=1; +insert into table merge0_mm select key from intermediate; +set tez.grouping.split-count=0; +select * from merge0_mm; + +drop table merge0_mm; + + +create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true'); + +insert into table merge1_mm partition (key) select key, key from intermediate; +select * from merge1_mm; + +set tez.grouping.split-count=1; +insert into table merge1_mm partition (key) select key, key from intermediate; +set tez.grouping.split-count=0; +select * from merge1_mm; + +drop table merge1_mm; + + +-- TODO: need to include merge+union, but it's broken for now + + + @@ -140,31 +176,14 @@ drop table skew_dp_union_mm; ---drop table merge_mm; + + --drop table ctas_mm; -- -- --create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; -- -- ---set hive.merge.mapredfiles=true; ---set hive.merge.sparkfiles=true; ---set hive.merge.tezfiles=true; --- ---CREATE TABLE merge_mm (key INT, value STRING) --- PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties ('hivecommit'='true'); --- ---EXPLAIN ---INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) --- SELECT key, value, PMOD(HASH(key), 2) as part --- FROM src; --- ---INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) --- SELECT key, value, PMOD(HASH(key), 2) as part --- FROM src; --- --- --- ---- TODO load, multi-insert etc -- -- diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index ceb7a1ae26d4..f423b00af704 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -8,45 +8,17 @@ set hive.tez.auto.reducer.parallelism=false; drop table intermediate; create table intermediate(key int) partitioned by (p int) stored as orc; -insert into table intermediate partition(p='455') select key from src limit 2; -insert into table intermediate partition(p='456') select key from src limit 2; +insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; -set hive.optimize.skewjoin.compiletime = true; +set hive.merge.orcfile.stripe.level=true; +set hive.merge.tezfiles=true; +set hive.merge.mapfiles=true; +set hive.merge.mapredfiles=true; -create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ('hivecommit'='false'); -insert into table skew_mm -select key, key, key from intermediate; -drop table skew_mm; - - -create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ('hivecommit'='true'); - -insert into table skew_mm -select key, key, key from intermediate; - -select * from skew_mm; -drop table skew_mm; - - - - - -create table skew_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) -skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true'); - -insert into table skew_mm partition (k3) -select key as i, key as j, key as k, key as l from intermediate -union all -select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate; - - -select * from skew_mm; -drop table skew_mm; diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out index b70ae3c592bf..f8001c2bbc04 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -153,12 +153,12 @@ POSTHOOK: Input: default@part_mm@key_mm=456 10 456 10 455 10 455 -97 456 97 455 +97 456 97 455 98 455 -98 455 98 456 +98 455 PREHOOK: query: drop table part_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@part_mm @@ -735,37 +735,195 @@ POSTHOOK: query: drop table skew_dp_union_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@skew_dp_union_mm POSTHOOK: Output: default@skew_dp_union_mm -PREHOOK: query: -- future +PREHOOK: query: create table merge0_mm (id int) stored as orc tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@merge0_mm +POSTHOOK: query: create table merge0_mm (id int) stored as orc tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@merge0_mm +PREHOOK: query: insert into table merge0_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@merge0_mm +POSTHOOK: query: insert into table merge0_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@merge0_mm +POSTHOOK: Lineage: merge0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from merge0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge0_mm +#### A masked pattern was here #### +98 +97 +0 +10 +PREHOOK: query: insert into table merge0_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@merge0_mm +POSTHOOK: query: insert into table merge0_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@merge0_mm +POSTHOOK: Lineage: merge0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from merge0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge0_mm +#### A masked pattern was here #### +98 +97 +0 +10 +98 +97 +0 +10 +PREHOOK: query: drop table merge0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@merge0_mm +PREHOOK: Output: default@merge0_mm +POSTHOOK: query: drop table merge0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@merge0_mm +POSTHOOK: Output: default@merge0_mm +PREHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@merge1_mm +POSTHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@merge1_mm +PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@merge1_mm +POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@merge1_mm@key=0 +POSTHOOK: Output: default@merge1_mm@key=10 +POSTHOOK: Output: default@merge1_mm@key=97 +POSTHOOK: Output: default@merge1_mm@key=98 +POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge1_mm +PREHOOK: Input: default@merge1_mm@key=0 +PREHOOK: Input: default@merge1_mm@key=10 +PREHOOK: Input: default@merge1_mm@key=97 +PREHOOK: Input: default@merge1_mm@key=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from merge1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge1_mm +POSTHOOK: Input: default@merge1_mm@key=0 +POSTHOOK: Input: default@merge1_mm@key=10 +POSTHOOK: Input: default@merge1_mm@key=97 +POSTHOOK: Input: default@merge1_mm@key=98 +#### A masked pattern was here #### +97 97 +98 98 +0 0 +10 10 +PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@merge1_mm +POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@merge1_mm@key=0 +POSTHOOK: Output: default@merge1_mm@key=10 +POSTHOOK: Output: default@merge1_mm@key=97 +POSTHOOK: Output: default@merge1_mm@key=98 +POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge1_mm +PREHOOK: Input: default@merge1_mm@key=0 +PREHOOK: Input: default@merge1_mm@key=10 +PREHOOK: Input: default@merge1_mm@key=97 +PREHOOK: Input: default@merge1_mm@key=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from merge1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge1_mm +POSTHOOK: Input: default@merge1_mm@key=0 +POSTHOOK: Input: default@merge1_mm@key=10 +POSTHOOK: Input: default@merge1_mm@key=97 +POSTHOOK: Input: default@merge1_mm@key=98 +#### A masked pattern was here #### +97 97 +97 97 +98 98 +98 98 +0 0 +0 0 +10 10 +10 10 +PREHOOK: query: drop table merge1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@merge1_mm +PREHOOK: Output: default@merge1_mm +POSTHOOK: query: drop table merge1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@merge1_mm +POSTHOOK: Output: default@merge1_mm +PREHOOK: query: -- TODO: need to include merge+union, but it's broken for now + + + + + + +-- future + + ---drop table merge_mm; --drop table ctas_mm; -- -- --create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; -- -- ---set hive.merge.mapredfiles=true; ---set hive.merge.sparkfiles=true; ---set hive.merge.tezfiles=true; --- ---CREATE TABLE merge_mm (key INT, value STRING) --- PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties ('hivecommit'='true'); --- ---EXPLAIN ---INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) --- SELECT key, value, PMOD(HASH(key), 2) as part --- FROM src; --- ---INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) --- SELECT key, value, PMOD(HASH(key), 2) as part --- FROM src; --- --- --- ---- TODO load, multi-insert etc -- -- @@ -774,37 +932,27 @@ drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: -- future +POSTHOOK: query: -- TODO: need to include merge+union, but it's broken for now + + + + + + +-- future + + ---drop table merge_mm; --drop table ctas_mm; -- -- --create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; -- -- ---set hive.merge.mapredfiles=true; ---set hive.merge.sparkfiles=true; ---set hive.merge.tezfiles=true; --- ---CREATE TABLE merge_mm (key INT, value STRING) --- PARTITIONED BY (ds STRING, part STRING) STORED AS ORC tblproperties ('hivecommit'='true'); --- ---EXPLAIN ---INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) --- SELECT key, value, PMOD(HASH(key), 2) as part --- FROM src; --- ---INSERT OVERWRITE TABLE merge_mm PARTITION (ds='123', part) --- SELECT key, value, PMOD(HASH(key), 2) as part --- FROM src; --- --- --- ---- TODO load, multi-insert etc -- -- diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index d6d31ea24bc6..4d28c63ed78e 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -10,181 +10,24 @@ POSTHOOK: query: create table intermediate(key int) partitioned by (p int) store POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@intermediate -PREHOOK: query: insert into table intermediate partition(p='455') select key from src limit 2 +PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@intermediate@p=455 -POSTHOOK: query: insert into table intermediate partition(p='455') select key from src limit 2 +POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=455 POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='456') select key from src limit 2 +PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@intermediate@p=456 -POSTHOOK: query: insert into table intermediate partition(p='456') select key from src limit 2 +POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ('hivecommit'='false') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@skew_mm -POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ('hivecommit'='false') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@skew_mm -PREHOOK: query: insert into table skew_mm -select key, key, key from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@skew_mm -POSTHOOK: query: insert into table skew_mm -select key, key, key from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@skew_mm -POSTHOOK: Lineage: skew_mm.k1 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm.k2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm.k4 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: drop table skew_mm -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@skew_mm -PREHOOK: Output: default@skew_mm -POSTHOOK: query: drop table skew_mm -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@skew_mm -POSTHOOK: Output: default@skew_mm -PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ('hivecommit'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@skew_mm -POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) - stored as directories tblproperties ('hivecommit'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@skew_mm -PREHOOK: query: insert into table skew_mm -select key, key, key from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@skew_mm -POSTHOOK: query: insert into table skew_mm -select key, key, key from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@skew_mm -POSTHOOK: Lineage: skew_mm.k1 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm.k2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm.k4 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from skew_mm -PREHOOK: type: QUERY -PREHOOK: Input: default@skew_mm -#### A masked pattern was here #### -POSTHOOK: query: select * from skew_mm -POSTHOOK: type: QUERY -POSTHOOK: Input: default@skew_mm -#### A masked pattern was here #### -455 455 455 -455 455 455 -0 0 0 -0 0 0 -PREHOOK: query: drop table skew_mm -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@skew_mm -PREHOOK: Output: default@skew_mm -POSTHOOK: query: drop table skew_mm -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@skew_mm -POSTHOOK: Output: default@skew_mm -PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) -skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@skew_mm -POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) -skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@skew_mm -PREHOOK: query: insert into table skew_mm partition (k3) -select key as i, key as j, key as k, key as l from intermediate -union all -select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@skew_mm -POSTHOOK: query: insert into table skew_mm partition (k3) -select key as i, key as j, key as k, key as l from intermediate -union all -select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@skew_mm@k3=0 -POSTHOOK: Output: default@skew_mm@k3=4 -POSTHOOK: Output: default@skew_mm@k3=455 -POSTHOOK: Output: default@skew_mm@k3=459 -POSTHOOK: Lineage: skew_mm PARTITION(k3=0).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=0).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=0).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=455).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=455).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=455).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=459).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=459).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=459).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=4).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=4).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: skew_mm PARTITION(k3=4).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from skew_mm -PREHOOK: type: QUERY -PREHOOK: Input: default@skew_mm -PREHOOK: Input: default@skew_mm@k3=0 -PREHOOK: Input: default@skew_mm@k3=4 -PREHOOK: Input: default@skew_mm@k3=455 -PREHOOK: Input: default@skew_mm@k3=459 -#### A masked pattern was here #### -POSTHOOK: query: select * from skew_mm -POSTHOOK: type: QUERY -POSTHOOK: Input: default@skew_mm -POSTHOOK: Input: default@skew_mm@k3=0 -POSTHOOK: Input: default@skew_mm@k3=4 -POSTHOOK: Input: default@skew_mm@k3=455 -POSTHOOK: Input: default@skew_mm@k3=459 -#### A masked pattern was here #### -0 0 0 0 -0 0 0 0 -1 2 3 4 -1 2 3 4 -455 455 455 455 -455 455 455 455 -456 457 458 459 -456 457 458 459 -PREHOOK: query: drop table skew_mm -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@skew_mm -PREHOOK: Output: default@skew_mm -POSTHOOK: query: drop table skew_mm -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@skew_mm -POSTHOOK: Output: default@skew_mm PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate From af4ff3787d648a9f4c80b5446d6bcd80b1efc69e Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Mon, 17 Oct 2016 12:33:31 -0700 Subject: [PATCH 10/24] HIVE-14643 : handle ctas for the MM tables (Sergey Shelukhin) --- .../apache/hadoop/hive/ql/exec/DDLTask.java | 12 +- .../hadoop/hive/ql/exec/FileSinkOperator.java | 52 +++-- .../apache/hadoop/hive/ql/exec/Utilities.java | 74 ++++--- .../apache/hadoop/hive/ql/metadata/Hive.java | 2 +- .../hive/ql/optimizer/GenMapRedUtils.java | 5 +- .../optimizer/unionproc/UnionProcFactory.java | 1 - .../hadoop/hive/ql/parse/GenTezUtils.java | 1 - .../hive/ql/parse/SemanticAnalyzer.java | 182 ++++++++++-------- .../hadoop/hive/ql/parse/TaskCompiler.java | 144 ++++++++------ .../hadoop/hive/ql/plan/CreateTableDesc.java | 22 +++ .../hadoop/hive/ql/plan/FileSinkDesc.java | 10 +- .../apache/hadoop/hive/ql/plan/LoadDesc.java | 5 +- .../hadoop/hive/ql/plan/LoadFileDesc.java | 2 +- .../apache/hadoop/hive/ql/plan/MoveWork.java | 2 +- ql/src/test/queries/clientpositive/mm_all.q | 30 ++- .../test/queries/clientpositive/mm_current.q | 10 +- .../results/clientpositive/llap/mm_all.q.out | 138 ++++++++----- .../clientpositive/llap/mm_current.q.out | 42 ++++ 18 files changed, 463 insertions(+), 271 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index acf570f6db68..bb9eaf5254aa 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -4059,8 +4059,18 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { } else { db.createTable(tbl, crtTbl.getIfNotExists()); } - if ( crtTbl.isCTAS()) { + if (crtTbl.isCTAS()) { Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName()); + if (crtTbl.getInitialWriteId() != null) { + // TODO# this would be retrieved via ACID before the query runs; for now we rely on it + // being zero at start; we can't create a write ID before we create the table here. + long initialWriteId = db.getNextTableWriteId(tbl.getDbName(), tbl.getTableName()); + if (initialWriteId != crtTbl.getInitialWriteId()) { + throw new HiveException("Initial write ID mismatch - expected " + + crtTbl.getInitialWriteId() + " but got " + initialWriteId); + } + db.commitMmTableWrite(tbl, initialWriteId); + } DataContainer dc = new DataContainer(createdTable.getTTable()); SessionState.get().getLineageState().setLineage( createdTable.getPath(), dc, createdTable.getCols() diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index dda4b5155eb6..ef6473af9cf7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -112,7 +112,8 @@ public class FileSinkOperator extends TerminalOperator implements protected transient Path parent; protected transient HiveOutputFormat hiveOutputFormat; protected transient Path specPath; - protected transient String childSpecPathDynLinkedPartitions; + protected transient String unionPath; + protected transient boolean isUnionDp; protected transient int dpStartCol; // start column # for DP columns protected transient List dpVals; // array of values corresponding to DP columns protected transient List dpWritables; @@ -304,7 +305,12 @@ public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeT } outPaths[filesIdx] = getTaskOutPath(taskId); } else { - String subdirPath = ValidWriteIds.getMmFilePrefix(conf.getMmWriteId()) + "/" + taskId; + String subdirPath = ValidWriteIds.getMmFilePrefix(conf.getMmWriteId()); + if (unionPath != null) { + // Create the union directory inside the MM directory. + subdirPath += Path.SEPARATOR + unionPath; + } + subdirPath += Path.SEPARATOR + taskId; if (!bDynParts && !isSkewedStoredAsSubDirectories) { finalPaths[filesIdx] = getFinalPath(subdirPath, specPath, extension); } else { @@ -369,7 +375,6 @@ public Path getTaskOutputTempPath() { protected boolean filesCreated = false; private void initializeSpecPath() { - // TODO# special case #N // For a query of the type: // insert overwrite table T1 // select * from (subq1 union all subq2)u; @@ -383,18 +388,25 @@ private void initializeSpecPath() { // and Parent/DynamicPartition/Child_1 respectively. // The movetask that follows subQ1 and subQ2 tasks still moves the directory // 'Parent' - if ((!conf.isLinkedFileSink()) || (dpCtx == null)) { + boolean isLinked = conf.isLinkedFileSink(); + if (!isLinked) { + // Simple case - no union. specPath = conf.getDirName(); - Utilities.LOG14535.info("Setting up FSOP " + System.identityHashCode(this) + " (" - + conf.isLinkedFileSink() + ") with " + taskId + " and " + specPath); - childSpecPathDynLinkedPartitions = null; - return; + unionPath = null; + } else { + isUnionDp = (dpCtx != null); + if (conf.isMmTable() || isUnionDp) { + // MM tables need custom handling for union suffix; DP tables use parent too. + specPath = conf.getParentDir(); + unionPath = conf.getDirName().getName(); + } else { + // For now, keep the old logic for non-MM non-DP union case. Should probably be unified. + specPath = conf.getDirName(); + unionPath = null; + } } - - specPath = conf.getParentDir(); - childSpecPathDynLinkedPartitions = conf.getDirName().getName(); Utilities.LOG14535.info("Setting up FSOP " + System.identityHashCode(this) + " (" - + conf.isLinkedFileSink() + ") with " + taskId + " and " + specPath); + + conf.isLinkedFileSink() + ") with " + taskId + " and " + specPath + " + " + unionPath); } /** Kryo ctor. */ @@ -903,9 +915,9 @@ protected FSPaths lookupListBucketingPaths(String lbDirName) throws HiveExceptio * @throws HiveException */ private FSPaths createNewPaths(String dirName) throws HiveException { - FSPaths fsp2 = new FSPaths(specPath, conf.isMmTable()); // TODO# this will break - fsp2.configureDynPartPath(dirName, childSpecPathDynLinkedPartitions); - Utilities.LOG14535.info("creating new paths for " + dirName + ", childSpec " + childSpecPathDynLinkedPartitions + FSPaths fsp2 = new FSPaths(specPath, conf.isMmTable()); + fsp2.configureDynPartPath(dirName, !conf.isMmTable() && isUnionDp ? unionPath : null); + Utilities.LOG14535.info("creating new paths for " + dirName + ", childSpec " + unionPath + ": tmpPath " + fsp2.getTmpPath() + ", task path " + fsp2.getTaskOutputTempPath()/*, new Exception()*/); if(!conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { createBucketFiles(fsp2); @@ -1129,8 +1141,8 @@ public void closeOp(boolean abort) throws HiveException { } } if (conf.getMmWriteId() != null) { - Utilities.writeMmCommitManifest(commitPaths, specPath, fs, taskId, conf.getMmWriteId(), - childSpecPathDynLinkedPartitions); + Utilities.writeMmCommitManifest( + commitPaths, specPath, fs, taskId, conf.getMmWriteId(), unionPath); } // Only publish stats if this operator's flag was set to gather stats if (conf.isGatherStats()) { @@ -1170,16 +1182,16 @@ public void jobCloseOp(Configuration hconf, boolean success) String unionSuffix = null; DynamicPartitionCtx dpCtx = conf.getDynPartCtx(); ListBucketingCtx lbCtx = conf.getLbCtx(); - if (conf.isLinkedFileSink() && (dpCtx != null)) { + if (conf.isLinkedFileSink() && (dpCtx != null || conf.isMmTable())) { specPath = conf.getParentDir(); - Utilities.LOG14535.info("Setting specPath to " + specPath + " for dynparts"); unionSuffix = conf.getDirName().getName(); } + Utilities.LOG14535.info("jobCloseOp using specPath " + specPath); if (!conf.isMmTable()) { Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, reporter); } else { int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), - lbLevels = lbCtx.calculateListBucketingLevel(); + lbLevels = lbCtx == null ? 0 : lbCtx.calculateListBucketingLevel(); // TODO: why is it stored in both? int numBuckets = (conf.getTable() != null) ? conf.getTable().getNumBuckets() : (dpCtx != null ? dpCtx.getNumBuckets() : 0); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 49bdd84ba7a2..accb237acb81 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -3773,26 +3773,23 @@ private static void tryDelete(FileSystem fs, Path path) { } } - private static FileStatus[] getMmDirectoryCandidates(FileSystem fs, Path path, - int dpLevels, int lbLevels, String unionSuffix, PathFilter filter) throws IOException { + private static FileStatus[] getMmDirectoryCandidates(FileSystem fs, Path path, int dpLevels, + int lbLevels, PathFilter filter, long mmWriteId) throws IOException { StringBuilder sb = new StringBuilder(path.toUri().getPath()); for (int i = 0; i < dpLevels + lbLevels; i++) { sb.append(Path.SEPARATOR).append("*"); } - if (unionSuffix != null) { - sb.append(Path.SEPARATOR).append(unionSuffix); - } - sb.append(Path.SEPARATOR).append("*"); // TODO: we could add exact mm prefix here + sb.append(Path.SEPARATOR).append(ValidWriteIds.getMmFilePrefix(mmWriteId)); Utilities.LOG14535.info("Looking for files via: " + sb.toString()); Path pathPattern = new Path(path, sb.toString()); return fs.globStatus(pathPattern, filter); } private static void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manifestDir, - int dpLevels, int lbLevels, String unionSuffix, ValidWriteIds.IdPathFilter filter) - throws IOException { + int dpLevels, int lbLevels, String unionSuffix, ValidWriteIds.IdPathFilter filter, + long mmWriteId) throws IOException { FileStatus[] files = getMmDirectoryCandidates( - fs, specPath, dpLevels, lbLevels, unionSuffix, filter); + fs, specPath, dpLevels, lbLevels, filter, mmWriteId); if (files != null) { for (FileStatus status : files) { Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); @@ -3854,7 +3851,8 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); if (!success) { - tryDeleteAllMmFiles(fs, specPath, manifestDir, dpLevels, lbLevels, unionSuffix, filter); + tryDeleteAllMmFiles(fs, specPath, manifestDir, dpLevels, lbLevels, + unionSuffix, filter, mmWriteId); return; } FileStatus[] files = HiveStatsUtils.getFileStatusRecurse(manifestDir, 1, fs, filter); @@ -3871,8 +3869,9 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } Utilities.LOG14535.info("Looking for files in: " + specPath); - files = getMmDirectoryCandidates(fs, specPath, dpLevels, lbLevels, unionSuffix, filter); - ArrayList results = new ArrayList<>(); + files = getMmDirectoryCandidates( + fs, specPath, dpLevels, lbLevels, filter, mmWriteId); + ArrayList mmDirectories = new ArrayList<>(); if (files != null) { for (FileStatus status : files) { Path path = status.getPath(); @@ -3883,7 +3882,7 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con tryDelete(fs, path); } } else { - results.add(status); + mmDirectories.add(status); } } } @@ -3901,16 +3900,8 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } } - for (FileStatus status : results) { - for (FileStatus child : fs.listStatus(status.getPath())) { - Path childPath = child.getPath(); - if (committed.remove(childPath.toString())) continue; // A good file. - Utilities.LOG14535.info("Deleting " + childPath + " that was not committed"); - // We should actually succeed here - if we fail, don't commit the query. - if (!fs.delete(childPath, true)) { - throw new HiveException("Failed to delete an uncommitted path " + childPath); - } - } + for (FileStatus status : mmDirectories) { + cleanMmDirectory(status.getPath(), fs, unionSuffix, committed); } if (!committed.isEmpty()) { @@ -3930,12 +3921,12 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } } - if (results.isEmpty()) return; + if (mmDirectories.isEmpty()) return; // TODO: see HIVE-14886 - removeTempOrDuplicateFiles is broken for list bucketing, // so maintain parity here by not calling it at all. if (lbLevels != 0) return; - FileStatus[] finalResults = results.toArray(new FileStatus[results.size()]); + FileStatus[] finalResults = mmDirectories.toArray(new FileStatus[mmDirectories.size()]); List emptyBuckets = Utilities.removeTempOrDuplicateFiles( fs, finalResults, dpLevels, mbc == null ? 0 : mbc.numBuckets, hconf); // create empty buckets if necessary @@ -3945,4 +3936,37 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } } + private static void cleanMmDirectory(Path dir, FileSystem fs, + String unionSuffix, HashSet committed) throws IOException, HiveException { + for (FileStatus child : fs.listStatus(dir)) { + Path childPath = child.getPath(); + if (unionSuffix == null) { + if (committed.remove(childPath.toString())) continue; // A good file. + deleteUncommitedFile(childPath, fs); + } else if (!child.isDirectory()) { + if (childPath.getName().endsWith(MANIFEST_EXTENSION)) continue; + if (committed.contains(childPath.toString())) { + throw new HiveException("Union FSOP has commited " + + childPath + " outside of union directory" + unionSuffix); + } + deleteUncommitedFile(childPath, fs); + } else if (childPath.getName().equals(unionSuffix)) { + // Found the right union directory; treat it as "our" MM directory. + cleanMmDirectory(childPath, fs, null, committed); + } else { + Utilities.LOG14535.info("FSOP for " + unionSuffix + + " is ignoring the other side of the union " + childPath.getName()); + } + } + } + + private static void deleteUncommitedFile(Path childPath, FileSystem fs) + throws IOException, HiveException { + Utilities.LOG14535.info("Deleting " + childPath + " that was not committed"); + // We should actually succeed here - if we fail, don't commit the query. + if (!fs.delete(childPath, true)) { + throw new HiveException("Failed to delete an uncommitted path " + childPath); + } + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 9a1c1fa20c08..8da9a8055218 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1515,7 +1515,7 @@ public void loadSinglePartition(Path loadPath, String tableName, } - private void commitMmTableWrite(Table tbl, Long mmWriteId) + public void commitMmTableWrite(Table tbl, Long mmWriteId) throws HiveException { try { getMSC().finalizeTableWrite(tbl.getDbName(), tbl.getTableName(), mmWriteId, true); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 79ef4d096a89..bd26854fa1e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1840,10 +1840,9 @@ public static Path createMoveTask(Task currTask, boolean // Change all the linked file sink descriptors if (fileSinkDesc.isLinkedFileSink()) { - for (FileSinkDesc fsConf:fileSinkDesc.getLinkedFileSinkDesc()) { - fsConf.setParentDir(tmpDir); + for (FileSinkDesc fsConf : fileSinkDesc.getLinkedFileSinkDesc()) { fsConf.setDirName(new Path(tmpDir, fsConf.getDirName().getName())); - Utilities.LOG14535.info("createMoveTask setting tmpDir for LinkedFileSink chDir " + fsConf.getDirName() + "; new parent " + tmpDir + ", dest was " + fileSinkDesc.getDestPath()); + Utilities.LOG14535.info("createMoveTask setting tmpDir for LinkedFileSink chDir " + fsConf.getDirName() + "; dest was " + fileSinkDesc.getDestPath()); } } else { fileSinkDesc.setDirName(tmpDir); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java index 7f7d19226aa9..3c3770937434 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java @@ -223,7 +223,6 @@ private void pushOperatorsAboveUnion(UnionOperator union, FileSinkDesc fileSinkDesc = (FileSinkDesc) fileSinkOp.getConf().clone(); fileSinkDesc.setDirName(new Path(parentDirName, parent.getIdentifier())); fileSinkDesc.setLinkedFileSink(true); - fileSinkDesc.setParentDir(parentDirName); Utilities.LOG14535.info("Created LinkedFileSink for union " + fileSinkDesc.getDirName() + "; parent " + parentDirName); parent.setChildOperators(null); Operator tmpFileSinkOp = diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java index 5c67fe2aee87..e1da05c26cb0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java @@ -307,7 +307,6 @@ public static void removeUnionOperators(GenTezProcContext context, BaseWork work desc.setDirName(new Path(path, "" + linked.size())); Utilities.LOG14535.info("removing union - new desc with " + desc.getDirName() + "; parent " + path); desc.setLinkedFileSink(true); - desc.setParentDir(path); desc.setLinkedFileSinkDesc(linked); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 905c0008aeff..62faf8959e9f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -244,6 +244,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { + public static final String DUMMY_DATABASE = "_dummy_database"; public static final String DUMMY_TABLE = "_dummy_table"; public static final String SUBQUERY_TAG_1 = "-subquery1"; @@ -6532,7 +6533,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) LoadTableDesc ltd = null; ListBucketingCtx lbCtx = null; Map partSpec = null; - boolean isMmTable = false; + boolean isMmTable = false, isMmCtas = false; Long mmWriteId = null; switch (dest_type.intValue()) { @@ -6676,26 +6677,6 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) case QBMetaData.DEST_DFS_FILE: { dest_path = new Path(qbm.getDestFileForAlias(dest)); - if (isLocal) { - // for local directory - we always write to map-red intermediate - // store and then copy to local fs - queryTmpdir = ctx.getMRTmpPath(); - } else { - // otherwise write to the file system implied by the directory - // no copy is required. we may want to revisit this policy in future - - try { - Path qPath = FileUtils.makeQualified(dest_path, conf); - queryTmpdir = ctx.getTempDirForPath(qPath); - } catch (Exception e) { - throw new SemanticException("Error creating temporary folder on: " - + dest_path, e); - } - } - String cols = ""; - String colTypes = ""; - ArrayList colInfos = inputRR.getColumnInfos(); - // CTAS case: the file output format and serde are defined by the create // table command rather than taking the default value List field_schemas = null; @@ -6705,64 +6686,39 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) field_schemas = new ArrayList(); destTableIsTemporary = tblDesc.isTemporary(); destTableIsMaterialization = tblDesc.isMaterialization(); + if (MetaStoreUtils.isMmTable(tblDesc.getTblProps())) { + isMmTable = isMmCtas = true; + // TODO# this should really get current ACID txn; assuming ACID works correctly the txn + // should have been opened to create the ACID table. For now use the first ID. + mmWriteId = 0l; + tblDesc.setInitialWriteId(mmWriteId); + } } else if (viewDesc != null) { field_schemas = new ArrayList(); destTableIsTemporary = false; } - boolean first = true; - for (ColumnInfo colInfo : colInfos) { - String[] nm = inputRR.reverseLookup(colInfo.getInternalName()); - - if (nm[1] != null) { // non-null column alias - colInfo.setAlias(nm[1]); - } - - String colName = colInfo.getInternalName(); //default column name - if (field_schemas != null) { - FieldSchema col = new FieldSchema(); - if (!("".equals(nm[0])) && nm[1] != null) { - colName = unescapeIdentifier(colInfo.getAlias()).toLowerCase(); // remove `` - } - colName = fixCtasColumnName(colName); - col.setName(colName); - String typeName = colInfo.getType().getTypeName(); - // CTAS should NOT create a VOID type - if (typeName.equals(serdeConstants.VOID_TYPE_NAME)) { - throw new SemanticException(ErrorMsg.CTAS_CREATES_VOID_TYPE - .getMsg(colName)); - } - col.setType(typeName); - field_schemas.add(col); - } - - if (!first) { - cols = cols.concat(","); - colTypes = colTypes.concat(":"); - } - - first = false; - cols = cols.concat(colName); - - // Replace VOID type with string when the output is a temp table or - // local files. - // A VOID type can be generated under the query: - // - // select NULL from tt; - // or - // insert overwrite local directory "abc" select NULL from tt; - // - // where there is no column type to which the NULL value should be - // converted. - // - String tName = colInfo.getType().getTypeName(); - if (tName.equals(serdeConstants.VOID_TYPE_NAME)) { - colTypes = colTypes.concat(serdeConstants.STRING_TYPE_NAME); - } else { - colTypes = colTypes.concat(tName); + if (isLocal) { + assert !isMmTable; + // for local directory - we always write to map-red intermediate + // store and then copy to local fs + queryTmpdir = ctx.getMRTmpPath(); + } else { + // otherwise write to the file system implied by the directory + // no copy is required. we may want to revisit this policy in future + try { + Path qPath = FileUtils.makeQualified(dest_path, conf); + queryTmpdir = isMmTable ? qPath : ctx.getTempDirForPath(qPath); + Utilities.LOG14535.info("Setting query directory " + queryTmpdir + " from " + dest_path + " (" + isMmTable + ")"); + } catch (Exception e) { + throw new SemanticException("Error creating temporary folder on: " + + dest_path, e); } } + ColsAndTypes ct = deriveFileSinkColTypes(inputRR, field_schemas); + String cols = ct.cols, colTypes = ct.colTypes; + // update the create table descriptor with the resulting schema. if (tblDesc != null) { tblDesc.setCols(new ArrayList(field_schemas)); @@ -6779,8 +6735,9 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } boolean isDfsDir = (dest_type.intValue() == QBMetaData.DEST_DFS_FILE); - loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, dest_path, isDfsDir, cols, - colTypes)); + // Create LFD even for MM CTAS - it's a no-op move, but it still seems to be uses for stats. + loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, + queryTmpdir, dest_path, isDfsDir, cols, colTypes)); if (tblDesc == null) { if (viewDesc != null) { @@ -6866,6 +6823,10 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_path, currentTableId, destTableIsAcid, destTableIsTemporary, destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, canBeMerged, mmWriteId); + if (isMmCtas) { + // Add FSD so that the LoadTask compilation could fix up its path to avoid the move. + tableDesc.setWriter(fileSinkDesc); + } Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( fileSinkDesc, fsRS, input), inputRR); @@ -6897,6 +6858,64 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) return output; } + private ColsAndTypes deriveFileSinkColTypes( + RowResolver inputRR, List field_schemas) throws SemanticException { + ColsAndTypes result = new ColsAndTypes("", ""); + ArrayList colInfos = inputRR.getColumnInfos(); + boolean first = true; + for (ColumnInfo colInfo : colInfos) { + String[] nm = inputRR.reverseLookup(colInfo.getInternalName()); + + if (nm[1] != null) { // non-null column alias + colInfo.setAlias(nm[1]); + } + + String colName = colInfo.getInternalName(); //default column name + if (field_schemas != null) { + FieldSchema col = new FieldSchema(); + if (!("".equals(nm[0])) && nm[1] != null) { + colName = unescapeIdentifier(colInfo.getAlias()).toLowerCase(); // remove `` + } + colName = fixCtasColumnName(colName); + col.setName(colName); + String typeName = colInfo.getType().getTypeName(); + // CTAS should NOT create a VOID type + if (typeName.equals(serdeConstants.VOID_TYPE_NAME)) { + throw new SemanticException(ErrorMsg.CTAS_CREATES_VOID_TYPE.getMsg(colName)); + } + col.setType(typeName); + field_schemas.add(col); + } + + if (!first) { + result.cols = result.cols.concat(","); + result.colTypes = result.colTypes.concat(":"); + } + + first = false; + result.cols = result.cols.concat(colName); + + // Replace VOID type with string when the output is a temp table or + // local files. + // A VOID type can be generated under the query: + // + // select NULL from tt; + // or + // insert overwrite local directory "abc" select NULL from tt; + // + // where there is no column type to which the NULL value should be + // converted. + // + String tName = colInfo.getType().getTypeName(); + if (tName.equals(serdeConstants.VOID_TYPE_NAME)) { + result.colTypes = result.colTypes.concat(serdeConstants.STRING_TYPE_NAME); + } else { + result.colTypes = result.colTypes.concat(tName); + } + } + return result; + } + private static Long getMmWriteId(Table tbl, boolean isMmTable) throws HiveException { if (!isMmTable) return null; // Get the next write ID for this table. We will prefix files with this write ID. @@ -10145,7 +10164,7 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String if (partitions != null) { for (Partition partn : partitions) { // inputs.add(new ReadEntity(partn)); // is this needed at all? - LOG.info("XXX: adding part: "+partn); + LOG.info("XXX: adding part: "+partn); outputs.add(new WriteEntity(partn, WriteEntity.WriteType.DDL_NO_LOCK)); } } @@ -11809,7 +11828,7 @@ ASTNode analyzeCreateTable( } } - if(location != null && location.length() != 0) { + if (location != null && location.length() != 0) { Path locPath = new Path(location); FileSystem curFs = null; FileStatus locStats = null; @@ -11818,7 +11837,7 @@ ASTNode analyzeCreateTable( if(curFs != null) { locStats = curFs.getFileStatus(locPath); } - if(locStats != null && locStats.isDir()) { + if (locStats != null && locStats.isDir()) { FileStatus[] lStats = curFs.listStatus(locPath); if(lStats != null && lStats.length != 0) { throw new SemanticException(ErrorMsg.CTAS_LOCATION_NONEMPTY.getMsg(location)); @@ -11835,14 +11854,13 @@ ASTNode analyzeCreateTable( } tblProps = addDefaultProperties(tblProps); - tableDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, - skewedColNames, skewedValues, true, primaryKeys, foreignKeys); + skewedColNames, skewedValues, true, primaryKeys, foreignKeys); tableDesc.setMaterialization(isMaterialization); tableDesc.setStoredAsSubDirectories(storedAsDirs); tableDesc.setNullFormat(rowFormatParams.nullFormat); @@ -13177,4 +13195,12 @@ public void setLoadFileWork(List loadFileWork) { this.loadFileWork = loadFileWork; } + private static final class ColsAndTypes { + public ColsAndTypes(String cols, String colTypes) { + this.cols = cols; + this.colTypes = colTypes; + } + public String cols; + public String colTypes; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 363d41a9b1af..e1779250ba1b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -34,7 +34,9 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.HiveStatsUtils; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.Context; @@ -62,6 +64,7 @@ import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.LoadFileDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; @@ -205,7 +208,7 @@ public void compile(final ParseContext pCtx, final List tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); mvTask.add(tsk); // Check to see if we are stale'ing any indexes and auto-update them if we want @@ -224,45 +227,15 @@ public void compile(final ParseContext pCtx, final List> leafTasks = new LinkedHashSet>(); - getLeafTasks(rootTasks, leafTasks); - if (isCStats) { - genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, leafTasks, outerQueryLimit, 0); - } else { - for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx - .getColumnStatsAutoGatherContexts()) { - if (!columnStatsAutoGatherContext.isInsertInto()) { - genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), - columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, 0); - } else { - int numBitVector; - try { - numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf); - } catch (Exception e) { - throw new SemanticException(e.getMessage()); - } - genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), - columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, numBitVector); - } - } - } + createColumnStatsTasks(pCtx, rootTasks, loadFileWork, isCStats, outerQueryLimit); } decideExecMode(rootTasks, ctx, globalLimitCtx); @@ -356,6 +308,80 @@ public void compile(final ParseContext pCtx, final List> rootTasks, + List loadFileWork, boolean isCStats, int outerQueryLimit) + throws SemanticException { + Set> leafTasks = new LinkedHashSet>(); + getLeafTasks(rootTasks, leafTasks); + if (isCStats) { + genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, leafTasks, outerQueryLimit, 0); + } else { + for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx + .getColumnStatsAutoGatherContexts()) { + if (!columnStatsAutoGatherContext.isInsertInto()) { + genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), + columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, 0); + } else { + int numBitVector; + try { + numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf); + } catch (Exception e) { + throw new SemanticException(e.getMessage()); + } + genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), + columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, numBitVector); + } + } + } + } + + private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticException { + try { + String protoName = null; + if (pCtx.getQueryProperties().isCTAS()) { + protoName = pCtx.getCreateTable().getTableName(); + } else if (pCtx.getQueryProperties().isMaterializedView()) { + protoName = pCtx.getCreateViewDesc().getViewName(); + } + String[] names = Utilities.getDbTableName(protoName); + if (!db.databaseExists(names[0])) { + throw new SemanticException("ERROR: The database " + names[0] + " does not exist."); + } + Warehouse wh = new Warehouse(conf); + return wh.getTablePath(db.getDatabase(names[0]), names[1]); + } catch (HiveException e) { + throw new SemanticException(e); + } catch (MetaException e) { + throw new SemanticException(e); + } + } + private void patchUpAfterCTASorMaterializedView(final List> rootTasks, final HashSet outputs, Task createTask) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index eafba2147640..760906810e18 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -94,6 +94,10 @@ public class CreateTableDesc extends DDLDesc implements Serializable { private boolean isCTAS = false; List primaryKeys; List foreignKeys; + private Long initialWriteId; + // The FSOP configuration for the FSOP that is going to write initial data during ctas. + // This is not needed beyond compilation, so it is transient. + private transient FileSinkDesc writer; public CreateTableDesc() { } @@ -825,5 +829,23 @@ public Table toTable(HiveConf conf) throws HiveException { return tbl; } + public void setInitialWriteId(Long mmWriteId) { + this.initialWriteId = mmWriteId; + } + + public Long getInitialWriteId() { + return initialWriteId; + } + + public FileSinkDesc getAndUnsetWriter() { + FileSinkDesc fsd = writer; + writer = null; + return fsd; + } + + public void setWriter(FileSinkDesc writer) { + this.writer = writer; + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index 8bef7a9305b0..d7d6e3831c22 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -82,7 +82,6 @@ public enum DPSortState { // the sub-queries write to sub-directories of a common directory. So, the file sink // descriptors for subq1 and subq2 are linked. private boolean linkedFileSink = false; - private Path parentDir; transient private List linkedFileSinkDesc; private boolean statsReliable; @@ -152,7 +151,6 @@ public Object clone() throws CloneNotSupportedException { ret.setStaticSpec(staticSpec); ret.setStatsAggPrefix(statsKeyPref); ret.setLinkedFileSink(linkedFileSink); - ret.setParentDir(parentDir); ret.setLinkedFileSinkDesc(linkedFileSinkDesc); ret.setStatsReliable(statsReliable); ret.setDpSortState(dpSortState); @@ -180,7 +178,7 @@ public void setDirName(final Path dirName) { } public Path getFinalDirName() { - return linkedFileSink ? parentDir : dirName; + return linkedFileSink ? dirName.getParent() : dirName; } /** getFinalDirName that takes into account MM, but not DP, LB or buckets. */ @@ -395,11 +393,7 @@ public void setLinkedFileSink(boolean linkedFileSink) { } public Path getParentDir() { - return parentDir; - } - - public void setParentDir(Path parentDir) { - this.parentDir = parentDir; + return dirName.getParent(); } public boolean isStatsReliable() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java index bcd3125ab4ad..d46f71eddc70 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java @@ -42,5 +42,8 @@ public LoadDesc(final Path sourcePath) { public Path getSourcePath() { return sourcePath; } - + + public void setSourcePath(Path path) { + this.sourcePath = path; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java index 064a8649c9c9..7670ef247d62 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java @@ -60,7 +60,7 @@ public LoadFileDesc(final Path sourcePath, final Path targetDir, final boolean isDfsDir, final String columns, final String columnTypes) { super(sourcePath); - Utilities.LOG14535.info("creating LFD from " + sourcePath + " to " + targetDir/*, new Exception()*/); + Utilities.LOG14535.info("creating LFD from " + sourcePath + " to " + targetDir, new Exception()); this.targetDir = targetDir; this.isDfsDir = isDfsDir; this.columns = columns; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java index f0b2775579b3..3ada134e4836 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java @@ -70,7 +70,7 @@ public MoveWork(HashSet inputs, HashSet outputs, boolean checkFileFormat, boolean srcLocal) { this(inputs, outputs); Utilities.LOG14535.info("Creating MoveWork " + System.identityHashCode(this) - + " with " + loadTableWork + "; " + loadFileWork); + + " with " + loadTableWork + "; " + loadFileWork, new Exception()); this.loadTableWork = loadTableWork; this.loadFileWork = loadFileWork; this.checkFileFormat = checkFileFormat; diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index 8ce42a2cf698..8163d2f44b13 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -162,30 +162,22 @@ select * from merge1_mm; drop table merge1_mm; +-- TODO: need to include merge+union+DP, but it's broken for now --- TODO: need to include merge+union, but it's broken for now +drop table ctas0_mm; +create table ctas0_mm tblproperties ('hivecommit'='true') as select * from intermediate; +select * from ctas0_mm; +drop table ctas0_mm; +drop table ctas1_mm; +create table ctas1_mm tblproperties ('hivecommit'='true') as + select * from intermediate union all select * from intermediate; +select * from ctas1_mm; +drop table ctas1_mm; - --- future - - - - - - - ---drop table ctas_mm; --- --- ---create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; --- --- ----- TODO load, multi-insert etc --- --- +-- TODO load, multi-insert, buckets drop table intermediate; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index f423b00af704..f2d353f77b7b 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -12,15 +12,13 @@ insert into table intermediate partition(p='455') select distinct key from src w insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; -set hive.merge.orcfile.stripe.level=true; -set hive.merge.tezfiles=true; -set hive.merge.mapfiles=true; -set hive.merge.mapredfiles=true; - - +create table ctas1_mm tblproperties ('hivecommit'='true') as + select * from intermediate union all select * from intermediate; +select * from ctas1_mm; +drop table ctas1_mm; drop table intermediate; diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out index f8001c2bbc04..93716de32227 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -154,11 +154,11 @@ POSTHOOK: Input: default@part_mm@key_mm=456 10 455 10 455 97 455 -97 456 97 455 -98 455 +97 456 98 456 98 455 +98 455 PREHOOK: query: drop table part_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@part_mm @@ -903,59 +903,105 @@ POSTHOOK: query: drop table merge1_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@merge1_mm POSTHOOK: Output: default@merge1_mm -PREHOOK: query: -- TODO: need to include merge+union, but it's broken for now - - - - - - --- future - - - +PREHOOK: query: -- TODO: need to include merge+union+DP, but it's broken for now +drop table ctas0_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- TODO: need to include merge+union+DP, but it's broken for now ---drop table ctas_mm; --- --- ---create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; --- --- ----- TODO load, multi-insert etc --- --- +drop table ctas0_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table ctas0_mm tblproperties ('hivecommit'='true') as select * from intermediate +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas0_mm +POSTHOOK: query: create table ctas0_mm tblproperties ('hivecommit'='true') as select * from intermediate +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ctas0_mm +POSTHOOK: Lineage: ctas0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: ctas0_mm.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from ctas0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@ctas0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from ctas0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ctas0_mm +#### A masked pattern was here #### +98 455 +97 455 +0 456 +10 456 +PREHOOK: query: drop table ctas0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ctas0_mm +PREHOOK: Output: default@ctas0_mm +POSTHOOK: query: drop table ctas0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ctas0_mm +POSTHOOK: Output: default@ctas0_mm +PREHOOK: query: drop table ctas1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table ctas1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as + select * from intermediate union all select * from intermediate +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas1_mm +POSTHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as + select * from intermediate union all select * from intermediate +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ctas1_mm +POSTHOOK: Lineage: ctas1_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: ctas1_mm.p EXPRESSION [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from ctas1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@ctas1_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from ctas1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ctas1_mm +#### A masked pattern was here #### +98 455 +97 455 +0 456 +10 456 +98 455 +97 455 +0 456 +10 456 +PREHOOK: query: drop table ctas1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ctas1_mm +PREHOOK: Output: default@ctas1_mm +POSTHOOK: query: drop table ctas1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ctas1_mm +POSTHOOK: Output: default@ctas1_mm +PREHOOK: query: -- TODO load, multi-insert, buckets drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: -- TODO: need to include merge+union, but it's broken for now - - - - - - --- future - - - - - - - ---drop table ctas_mm; --- --- ---create table ctas_mm tblproperties ('hivecommit'='true') as select * from src limit 3; --- --- ----- TODO load, multi-insert etc --- --- +POSTHOOK: query: -- TODO load, multi-insert, buckets drop table intermediate POSTHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index 4d28c63ed78e..5b51fa360311 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -28,6 +28,48 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as + select * from intermediate union all select * from intermediate +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas1_mm +POSTHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as + select * from intermediate union all select * from intermediate +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ctas1_mm +POSTHOOK: Lineage: ctas1_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: ctas1_mm.p EXPRESSION [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from ctas1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@ctas1_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from ctas1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ctas1_mm +#### A masked pattern was here #### +98 455 +97 455 +0 456 +10 456 +98 455 +97 455 +0 456 +10 456 +PREHOOK: query: drop table ctas1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ctas1_mm +PREHOOK: Output: default@ctas1_mm +POSTHOOK: query: drop table ctas1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ctas1_mm +POSTHOOK: Output: default@ctas1_mm PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate From edaebb4b29e99a0fd4abf50db910449f1c41d06a Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Mon, 17 Oct 2016 13:41:20 -0700 Subject: [PATCH 11/24] HIVE-14932 : handle bucketing for MM tables (Sergey Shelukhin) --- .../apache/hadoop/hive/ql/exec/Utilities.java | 201 ++++--- .../hadoop/hive/ql/metadata/Partition.java | 1 + .../hive/ql/optimizer/GenMapRedUtils.java | 2 +- .../hive/ql/optimizer/SamplePruner.java | 7 +- .../apache/hadoop/hive/ql/plan/MoveWork.java | 2 +- ql/src/test/queries/clientpositive/mm_all.q | 7 +- ql/src/test/queries/clientpositive/mm_all2.q | 70 +++ .../test/queries/clientpositive/mm_current.q | 18 +- .../results/clientpositive/llap/mm_all.q.out | 204 ++++++- .../results/clientpositive/llap/mm_all2.q.out | 503 ++++++++++++++++++ .../clientpositive/llap/mm_current.q.out | 217 ++++++-- 11 files changed, 1094 insertions(+), 138 deletions(-) create mode 100644 ql/src/test/queries/clientpositive/mm_all2.q create mode 100644 ql/src/test/results/clientpositive/llap/mm_all2.q.out diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index accb237acb81..f1dad715e952 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -1525,23 +1525,22 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), numBuckets = (conf != null && conf.getTable() != null) ? conf.getTable().getNumBuckets() : 0; - return removeTempOrDuplicateFiles(fs, fileStats, dpLevels, numBuckets, hconf); + return removeTempOrDuplicateFiles(fs, fileStats, dpLevels, numBuckets, hconf, null); } public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats, - int dpLevels, int numBuckets, Configuration hconf) throws IOException { + int dpLevels, int numBuckets, Configuration hconf, Long mmWriteId) throws IOException { if (fileStats == null) { return null; } - List result = new ArrayList(); HashMap taskIDToFile = null; if (dpLevels > 0) { FileStatus parts[] = fileStats; - for (int i = 0; i < parts.length; ++i) { assert parts[i].isDir() : "dynamic partition " + parts[i].getPath() + " is not a directory"; + Utilities.LOG14535.info("removeTempOrDuplicateFiles looking at DP " + parts[i].getPath()); FileStatus[] items = fs.listStatus(parts[i].getPath()); // remove empty directory since DP insert should not generate empty partitions. @@ -1551,46 +1550,80 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] LOG.error("Cannot delete empty directory " + parts[i].getPath()); throw new IOException("Cannot delete empty directory " + parts[i].getPath()); } + parts[i] = null; + continue; } - taskIDToFile = removeTempOrDuplicateFiles(items, fs); - // if the table is bucketed and enforce bucketing, we should check and generate all buckets - if (numBuckets > 0 && taskIDToFile != null && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { - // refresh the file list - items = fs.listStatus(parts[i].getPath()); - // get the missing buckets and generate empty buckets - String taskID1 = taskIDToFile.keySet().iterator().next(); - Path bucketPath = taskIDToFile.values().iterator().next().getPath(); - Utilities.LOG14535.info("Bucket path " + bucketPath); - for (int j = 0; j < numBuckets; ++j) { - addBucketFileIfMissing(result, taskIDToFile, taskID1, bucketPath, j); + if (mmWriteId != null) { + Path mmDir = parts[i].getPath(); + if (!mmDir.getName().equals(ValidWriteIds.getMmFilePrefix(mmWriteId))) { + throw new IOException("Unexpected non-MM directory name " + mmDir); } + Utilities.LOG14535.info("removeTempOrDuplicateFiles processing files in MM directory " + mmDir); } + taskIDToFile = removeTempOrDuplicateFilesNonMm(items, fs); + + // TODO: not clear why two if conditions are different. Preserve the existing logic for now. + addBucketFileToResults(taskIDToFile, numBuckets, hconf, result); } } else { FileStatus[] items = fileStats; if (items.length == 0) { return result; } - taskIDToFile = removeTempOrDuplicateFiles(items, fs); - if(taskIDToFile != null && taskIDToFile.size() > 0 && (numBuckets > taskIDToFile.size()) - && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { - // get the missing buckets and generate empty buckets for non-dynamic partition - String taskID1 = taskIDToFile.keySet().iterator().next(); - Path bucketPath = taskIDToFile.values().iterator().next().getPath(); - Utilities.LOG14535.info("Bucket path " + bucketPath); - for (int j = 0; j < numBuckets; ++j) { - addBucketFileIfMissing(result, taskIDToFile, taskID1, bucketPath, j); + if (mmWriteId == null) { + taskIDToFile = removeTempOrDuplicateFilesNonMm(items, fs); + } else { + if (items.length > 1) { + throw new IOException("Unexpected directories for non-DP MM: " + Arrays.toString(items)); + } + Path mmDir = items[0].getPath(); + if (!items[0].isDirectory() + || !mmDir.getName().equals(ValidWriteIds.getMmFilePrefix(mmWriteId))) { + throw new IOException("Unexpected non-MM directory " + mmDir); } + Utilities.LOG14535.info( + "removeTempOrDuplicateFiles processing files in MM directory " + mmDir); + taskIDToFile = removeTempOrDuplicateFilesNonMm(fs.listStatus(mmDir), fs); } + // TODO: not clear why two if conditions are different. Preserve the existing logic for now. + addBucketFileToResults2(taskIDToFile, numBuckets, hconf, result); } return result; } + // TODO: not clear why two if conditions are different. Preserve the existing logic for now. + private static void addBucketFileToResults2(HashMap taskIDToFile, + int numBuckets, Configuration hconf, List result) { + if(taskIDToFile != null && taskIDToFile.size() > 0 && (numBuckets > taskIDToFile.size()) + && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { + addBucketsToResultsCommon(taskIDToFile, numBuckets, result); + } + } + + // TODO: not clear why two if conditions are different. Preserve the existing logic for now. + private static void addBucketFileToResults(HashMap taskIDToFile, + int numBuckets, Configuration hconf, List result) { + // if the table is bucketed and enforce bucketing, we should check and generate all buckets + if (numBuckets > 0 && taskIDToFile != null + && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { + addBucketsToResultsCommon(taskIDToFile, numBuckets, result); + } + } + + private static void addBucketsToResultsCommon( + HashMap taskIDToFile, int numBuckets, List result) { + String taskID1 = taskIDToFile.keySet().iterator().next(); + Path bucketPath = taskIDToFile.values().iterator().next().getPath(); + Utilities.LOG14535.info("Bucket path " + bucketPath); + for (int j = 0; j < numBuckets; ++j) { + addBucketFileIfMissing(result, taskIDToFile, taskID1, bucketPath, j); + } + } + private static void addBucketFileIfMissing(List result, HashMap taskIDToFile, String taskID1, Path bucketPath, int j) { - // TODO# this will probably break with directories cause buckets would be above (or not?) String taskID2 = replaceTaskId(taskID1, j); if (!taskIDToFile.containsKey(taskID2)) { // create empty bucket, file name should be derived from taskID2 @@ -1601,77 +1634,81 @@ private static void addBucketFileIfMissing(List result, } } - public static HashMap removeTempOrDuplicateFiles(FileStatus[] items, - FileSystem fs) throws IOException { - - if (items == null || fs == null) { + private static HashMap removeTempOrDuplicateFilesNonMm( + FileStatus[] files, FileSystem fs) throws IOException { + if (files == null || fs == null) { return null; } - HashMap taskIdToFile = new HashMap(); - for (FileStatus one : items) { + for (FileStatus one : files) { if (isTempPath(one)) { Utilities.LOG14535.info("removeTempOrDuplicateFiles deleting " + one.getPath()/*, new Exception()*/); if (!fs.delete(one.getPath(), true)) { throw new IOException("Unable to delete tmp file: " + one.getPath()); } } else { - String taskId = getPrefixedTaskIdFromFilename(one.getPath().getName()); - Utilities.LOG14535.info("removeTempOrDuplicateFiles pondering " + one.getPath() + ", taskId " + taskId); - - FileStatus otherFile = taskIdToFile.get(taskId); - if (otherFile == null) { - taskIdToFile.put(taskId, one); - } else { - // Compare the file sizes of all the attempt files for the same task, the largest win - // any attempt files could contain partial results (due to task failures or - // speculative runs), but the largest should be the correct one since the result - // of a successful run should never be smaller than a failed/speculative run. - FileStatus toDelete = null; - - // "LOAD .. INTO" and "INSERT INTO" commands will generate files with - // "_copy_x" suffix. These files are usually read by map tasks and the - // task output gets written to some tmp path. The output file names will - // be of format taskId_attemptId. The usual path for all these tasks is - // srcPath -> taskTmpPath -> tmpPath -> finalPath. - // But, MergeFileTask can move files directly from src path to final path - // without copying it to tmp path. In such cases, different files with - // "_copy_x" suffix will be identified as duplicates (change in value - // of x is wrongly identified as attempt id) and will be deleted. - // To avoid that we will ignore files with "_copy_x" suffix from duplicate - // elimination. - if (!isCopyFile(one.getPath().getName())) { - if (otherFile.getLen() >= one.getLen()) { - toDelete = one; - } else { - toDelete = otherFile; - taskIdToFile.put(taskId, one); - } - long len1 = toDelete.getLen(); - long len2 = taskIdToFile.get(taskId).getLen(); - if (!fs.delete(toDelete.getPath(), true)) { - throw new IOException( - "Unable to delete duplicate file: " + toDelete.getPath() - + ". Existing file: " + - taskIdToFile.get(taskId).getPath()); - } else { - LOG.warn("Duplicate taskid file removed: " + toDelete.getPath() + - " with length " - + len1 + ". Existing file: " + - taskIdToFile.get(taskId).getPath() + " with length " - + len2); - } - } else { - LOG.info(one.getPath() + " file identified as duplicate. This file is" + - " not deleted as it has copySuffix."); - } - } + // This would be a single file. See if we need to remove it. + ponderRemovingTempOrDuplicateFile(fs, one, taskIdToFile); } } return taskIdToFile; } + private static void ponderRemovingTempOrDuplicateFile(FileSystem fs, + FileStatus file, HashMap taskIdToFile) throws IOException { + String taskId = getPrefixedTaskIdFromFilename(file.getPath().getName()); + Utilities.LOG14535.info("removeTempOrDuplicateFiles pondering " + file.getPath() + ", taskId " + taskId); + + FileStatus otherFile = taskIdToFile.get(taskId); + taskIdToFile.put(taskId, (otherFile == null) ? file : + compareTempOrDuplicateFiles(fs, file, otherFile)); + } + + private static FileStatus compareTempOrDuplicateFiles(FileSystem fs, + FileStatus file, FileStatus existingFile) throws IOException { + // Compare the file sizes of all the attempt files for the same task, the largest win + // any attempt files could contain partial results (due to task failures or + // speculative runs), but the largest should be the correct one since the result + // of a successful run should never be smaller than a failed/speculative run. + FileStatus toDelete = null, toRetain = null; + + // "LOAD .. INTO" and "INSERT INTO" commands will generate files with + // "_copy_x" suffix. These files are usually read by map tasks and the + // task output gets written to some tmp path. The output file names will + // be of format taskId_attemptId. The usual path for all these tasks is + // srcPath -> taskTmpPath -> tmpPath -> finalPath. + // But, MergeFileTask can move files directly from src path to final path + // without copying it to tmp path. In such cases, different files with + // "_copy_x" suffix will be identified as duplicates (change in value + // of x is wrongly identified as attempt id) and will be deleted. + // To avoid that we will ignore files with "_copy_x" suffix from duplicate + // elimination. + if (isCopyFile(file.getPath().getName())) { + LOG.info(file.getPath() + " file identified as duplicate. This file is" + + " not deleted as it has copySuffix."); + return existingFile; + } + + if (existingFile.getLen() >= file.getLen()) { + toDelete = file; + toRetain = existingFile; + } else { + toDelete = existingFile; + toRetain = file; + } + if (!fs.delete(toDelete.getPath(), true)) { + throw new IOException( + "Unable to delete duplicate file: " + toDelete.getPath() + + ". Existing file: " + toRetain.getPath()); + } else { + LOG.warn("Duplicate taskid file removed: " + toDelete.getPath() + " with length " + + toDelete.getLen() + ". Existing file: " + toRetain.getPath() + " with length " + + toRetain.getLen()); + } + return toRetain; + } + public static boolean isCopyFile(String filename) { String taskId = filename; String copyFileSuffix = null; @@ -3928,7 +3965,7 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con if (lbLevels != 0) return; FileStatus[] finalResults = mmDirectories.toArray(new FileStatus[mmDirectories.size()]); List emptyBuckets = Utilities.removeTempOrDuplicateFiles( - fs, finalResults, dpLevels, mbc == null ? 0 : mbc.numBuckets, hconf); + fs, finalResults, dpLevels, mbc == null ? 0 : mbc.numBuckets, hconf, mmWriteId); // create empty buckets if necessary if (emptyBuckets.size() > 0) { assert mbc != null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index c0edde9e9231..95a09e2989d7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -366,6 +366,7 @@ public FileStatus[] getSortedPaths() { // TODO: add test case and clean it up @SuppressWarnings("nls") public Path getBucketPath(int bucketNum) { + // Note: this makes assumptions that won't work with MM tables, unions, etc. FileStatus srcs[] = getSortedPaths(); if (srcs == null) { return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index bd26854fa1e7..0b5d56b2b41d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -674,7 +674,7 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set inputs, HashSet outputs, boolean checkFileFormat, boolean srcLocal) { this(inputs, outputs); Utilities.LOG14535.info("Creating MoveWork " + System.identityHashCode(this) - + " with " + loadTableWork + "; " + loadFileWork, new Exception()); + + " with " + loadTableWork + "; " + loadFileWork); this.loadTableWork = loadTableWork; this.loadFileWork = loadFileWork; this.checkFileFormat = checkFileFormat; diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index 8163d2f44b13..bdda5f5de71f 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -11,6 +11,8 @@ drop table intermediate; create table intermediate(key int) partitioned by (p int) stored as orc; insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; +insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; + drop table part_mm; create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true'); @@ -18,7 +20,7 @@ explain insert into table part_mm partition(key_mm='455') select key from interm insert into table part_mm partition(key_mm='455') select key from intermediate; insert into table part_mm partition(key_mm='456') select key from intermediate; insert into table part_mm partition(key_mm='455') select key from intermediate; -select * from part_mm order by key; +select * from part_mm order by key, key_mm; drop table part_mm; drop table simple_mm; @@ -177,7 +179,4 @@ select * from ctas1_mm; drop table ctas1_mm; - --- TODO load, multi-insert, buckets - drop table intermediate; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/mm_all2.q b/ql/src/test/queries/clientpositive/mm_all2.q new file mode 100644 index 000000000000..a1d2301908a4 --- /dev/null +++ b/ql/src/test/queries/clientpositive/mm_all2.q @@ -0,0 +1,70 @@ +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.fetch.task.conversion=none; +set tez.grouping.min-size=1; +set tez.grouping.max-size=2; +set hive.exec.dynamic.partition.mode=nonstrict; + + +-- Force multiple writers when reading +drop table intermediate; +create table intermediate(key int) partitioned by (p int) stored as orc; +insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; +insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; + + + +drop table bucket0_mm; +create table bucket0_mm(key int, id int) +clustered by (key) into 2 buckets +tblproperties('hivecommit'='true'); +insert into table bucket0_mm select key, key from intermediate; +select * from bucket0_mm; +select * from bucket0_mm tablesample (bucket 1 out of 2) s; +select * from bucket0_mm tablesample (bucket 2 out of 2) s; +insert into table bucket0_mm select key, key from intermediate; +select * from bucket0_mm; +select * from bucket0_mm tablesample (bucket 1 out of 2) s; +select * from bucket0_mm tablesample (bucket 2 out of 2) s; +drop table bucket0_mm; + + +drop table bucket1_mm; +create table bucket1_mm(key int, id int) partitioned by (key2 int) +clustered by (key) sorted by (key) into 2 buckets +tblproperties('hivecommit'='true'); +insert into table bucket1_mm partition (key2) +select key + 1, key, key - 1 from intermediate +union all +select key - 1, key, key + 1 from intermediate; +select * from bucket1_mm; +select * from bucket1_mm tablesample (bucket 1 out of 2) s; +select * from bucket1_mm tablesample (bucket 2 out of 2) s; +drop table bucket1_mm; + + + +drop table bucket2_mm; +create table bucket2_mm(key int, id int) +clustered by (key) into 10 buckets +tblproperties('hivecommit'='true'); +insert into table bucket2_mm select key, key from intermediate where key == 0; +select * from bucket2_mm; +select * from bucket2_mm tablesample (bucket 1 out of 10) s; +select * from bucket2_mm tablesample (bucket 4 out of 10) s; +insert into table bucket2_mm select key, key from intermediate where key in (0, 103); +select * from bucket2_mm; +select * from bucket2_mm tablesample (bucket 1 out of 10) s; +select * from bucket2_mm tablesample (bucket 4 out of 10) s; +drop table bucket2_mm; + + + +-- TODO# future + + + +-- TODO load, multi-insert, buckets + +drop table intermediate; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index f2d353f77b7b..44445dd835c0 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -10,16 +10,24 @@ drop table intermediate; create table intermediate(key int) partitioned by (p int) stored as orc; insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; +insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; +drop table bucket1_mm; +create table bucket1_mm(key int, id int) partitioned by (key2 int) +clustered by (key) sorted by (key) into 2 buckets +tblproperties('hivecommit'='true'); +insert into table bucket1_mm partition (key2) +select key + 1, key, key - 1 from intermediate +union all +select key - 1, key, key + 1 from intermediate; +select * from bucket1_mm; +select * from bucket1_mm tablesample (bucket 1 out of 2) s; +select * from bucket1_mm tablesample (bucket 2 out of 2) s; +drop table bucket1_mm; -create table ctas1_mm tblproperties ('hivecommit'='true') as - select * from intermediate union all select * from intermediate; -select * from ctas1_mm; -drop table ctas1_mm; - drop table intermediate; diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out index 93716de32227..4061e5bb2bdc 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -30,6 +30,15 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=457 +POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=457 +POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] PREHOOK: query: drop table part_mm PREHOOK: type: DROPTABLE POSTHOOK: query: drop table part_mm @@ -61,14 +70,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: intermediate - Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: key (type: int) outputColumnNames: _col0 - Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 48 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -101,12 +110,14 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@part_mm@key_mm=455 POSTHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@part_mm@key_mm=455 POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: insert into table part_mm partition(key_mm='456') select key from intermediate @@ -114,12 +125,14 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@part_mm@key_mm=456 POSTHOOK: query: insert into table part_mm partition(key_mm='456') select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@part_mm@key_mm=456 POSTHOOK: Lineage: part_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate @@ -127,21 +140,23 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@part_mm@key_mm=455 POSTHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@part_mm@key_mm=455 POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from part_mm order by key +PREHOOK: query: select * from part_mm order by key, key_mm PREHOOK: type: QUERY PREHOOK: Input: default@part_mm PREHOOK: Input: default@part_mm@key_mm=455 PREHOOK: Input: default@part_mm@key_mm=456 #### A masked pattern was here #### -POSTHOOK: query: select * from part_mm order by key +POSTHOOK: query: select * from part_mm order by key, key_mm POSTHOOK: type: QUERY POSTHOOK: Input: default@part_mm POSTHOOK: Input: default@part_mm@key_mm=455 @@ -150,15 +165,21 @@ POSTHOOK: Input: default@part_mm@key_mm=456 0 455 0 455 0 456 -10 456 10 455 10 455 +10 456 97 455 97 455 97 456 -98 456 98 455 98 455 +98 456 +100 455 +100 455 +100 456 +103 455 +103 455 +103 456 PREHOOK: query: drop table part_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@part_mm @@ -184,12 +205,14 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@simple_mm POSTHOOK: query: insert into table simple_mm select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@simple_mm POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: insert overwrite table simple_mm select key from intermediate @@ -197,12 +220,14 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@simple_mm POSTHOOK: query: insert overwrite table simple_mm select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@simple_mm POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from simple_mm order by key @@ -217,17 +242,21 @@ POSTHOOK: Input: default@simple_mm 10 97 98 +100 +103 PREHOOK: query: insert into table simple_mm select key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@simple_mm POSTHOOK: query: insert into table simple_mm select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@simple_mm POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from simple_mm order by key @@ -246,6 +275,10 @@ POSTHOOK: Input: default@simple_mm 97 98 98 +100 +100 +103 +103 PREHOOK: query: drop table simple_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@simple_mm @@ -275,17 +308,23 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@dp_mm@key1=123 POSTHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@dp_mm@key1=123/key2=0 POSTHOOK: Output: default@dp_mm@key1=123/key2=10 +POSTHOOK: Output: default@dp_mm@key1=123/key2=100 +POSTHOOK: Output: default@dp_mm@key1=123/key2=103 POSTHOOK: Output: default@dp_mm@key1=123/key2=97 POSTHOOK: Output: default@dp_mm@key1=123/key2=98 POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=100).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=103).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=10).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=97).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=98).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] @@ -294,6 +333,8 @@ PREHOOK: type: QUERY PREHOOK: Input: default@dp_mm PREHOOK: Input: default@dp_mm@key1=123/key2=0 PREHOOK: Input: default@dp_mm@key1=123/key2=10 +PREHOOK: Input: default@dp_mm@key1=123/key2=100 +PREHOOK: Input: default@dp_mm@key1=123/key2=103 PREHOOK: Input: default@dp_mm@key1=123/key2=97 PREHOOK: Input: default@dp_mm@key1=123/key2=98 #### A masked pattern was here #### @@ -302,6 +343,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@dp_mm POSTHOOK: Input: default@dp_mm@key1=123/key2=0 POSTHOOK: Input: default@dp_mm@key1=123/key2=10 +POSTHOOK: Input: default@dp_mm@key1=123/key2=100 +POSTHOOK: Input: default@dp_mm@key1=123/key2=103 POSTHOOK: Input: default@dp_mm@key1=123/key2=97 POSTHOOK: Input: default@dp_mm@key1=123/key2=98 #### A masked pattern was here #### @@ -309,6 +352,8 @@ POSTHOOK: Input: default@dp_mm@key1=123/key2=98 10 123 10 97 123 97 98 123 98 +100 123 100 +103 123 103 PREHOOK: query: drop table dp_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@dp_mm @@ -338,6 +383,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@union_mm POSTHOOK: query: insert into table union_mm select temps.p from ( @@ -348,6 +394,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@union_mm POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from union_mm order by id @@ -366,6 +413,10 @@ POSTHOOK: Input: default@union_mm 98 98 99 +100 +101 +103 +104 PREHOOK: query: insert into table union_mm select p from ( @@ -379,6 +430,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@union_mm POSTHOOK: query: insert into table union_mm select p from @@ -393,6 +445,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@union_mm POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from union_mm order by id @@ -422,6 +475,16 @@ POSTHOOK: Input: default@union_mm 99 99 100 +100 +100 +101 +101 +102 +103 +103 +104 +104 +105 PREHOOK: query: insert into table union_mm SELECT p FROM ( @@ -442,6 +505,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@union_mm POSTHOOK: query: insert into table union_mm SELECT p FROM @@ -463,6 +527,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@union_mm POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from union_mm order by id @@ -502,6 +567,21 @@ POSTHOOK: Input: default@union_mm 99 100 100 +100 +100 +101 +101 +101 +102 +102 +103 +103 +103 +104 +104 +104 +105 +105 PREHOOK: query: drop table union_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@union_mm @@ -527,6 +607,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@partunion_mm POSTHOOK: query: insert into table partunion_mm partition(key) select temps.* from ( @@ -537,14 +618,23 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@partunion_mm@key=0 POSTHOOK: Output: default@partunion_mm@key=1 POSTHOOK: Output: default@partunion_mm@key=10 +POSTHOOK: Output: default@partunion_mm@key=100 +POSTHOOK: Output: default@partunion_mm@key=101 +POSTHOOK: Output: default@partunion_mm@key=103 +POSTHOOK: Output: default@partunion_mm@key=104 POSTHOOK: Output: default@partunion_mm@key=11 POSTHOOK: Output: default@partunion_mm@key=97 POSTHOOK: Output: default@partunion_mm@key=98 POSTHOOK: Output: default@partunion_mm@key=99 POSTHOOK: Lineage: partunion_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=100).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=101).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=103).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=104).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: partunion_mm PARTITION(key=10).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: partunion_mm PARTITION(key=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: partunion_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] @@ -557,6 +647,10 @@ PREHOOK: Input: default@partunion_mm PREHOOK: Input: default@partunion_mm@key=0 PREHOOK: Input: default@partunion_mm@key=1 PREHOOK: Input: default@partunion_mm@key=10 +PREHOOK: Input: default@partunion_mm@key=100 +PREHOOK: Input: default@partunion_mm@key=101 +PREHOOK: Input: default@partunion_mm@key=103 +PREHOOK: Input: default@partunion_mm@key=104 PREHOOK: Input: default@partunion_mm@key=11 PREHOOK: Input: default@partunion_mm@key=97 PREHOOK: Input: default@partunion_mm@key=98 @@ -568,6 +662,10 @@ POSTHOOK: Input: default@partunion_mm POSTHOOK: Input: default@partunion_mm@key=0 POSTHOOK: Input: default@partunion_mm@key=1 POSTHOOK: Input: default@partunion_mm@key=10 +POSTHOOK: Input: default@partunion_mm@key=100 +POSTHOOK: Input: default@partunion_mm@key=101 +POSTHOOK: Input: default@partunion_mm@key=103 +POSTHOOK: Input: default@partunion_mm@key=104 POSTHOOK: Input: default@partunion_mm@key=11 POSTHOOK: Input: default@partunion_mm@key=97 POSTHOOK: Input: default@partunion_mm@key=98 @@ -581,6 +679,10 @@ POSTHOOK: Input: default@partunion_mm@key=99 98 98 98 98 99 99 +100 100 +101 101 +103 103 +104 104 PREHOOK: query: drop table partunion_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@partunion_mm @@ -605,6 +707,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@skew_mm POSTHOOK: query: insert into table skew_mm select key, key, key from intermediate @@ -612,6 +715,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@skew_mm POSTHOOK: Lineage: skew_mm.k1 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_mm.k2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] @@ -628,6 +732,8 @@ POSTHOOK: Input: default@skew_mm 10 10 10 97 97 97 98 98 98 +100 100 100 +103 103 103 PREHOOK: query: drop table skew_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@skew_mm @@ -654,6 +760,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@skew_dp_union_mm POSTHOOK: query: insert into table skew_dp_union_mm partition (k3) select key as i, key as j, key as k, key as l from intermediate @@ -663,10 +770,15 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@skew_dp_union_mm@k3=0 POSTHOOK: Output: default@skew_dp_union_mm@k3=10 +POSTHOOK: Output: default@skew_dp_union_mm@k3=100 POSTHOOK: Output: default@skew_dp_union_mm@k3=101 POSTHOOK: Output: default@skew_dp_union_mm@k3=102 +POSTHOOK: Output: default@skew_dp_union_mm@k3=103 +POSTHOOK: Output: default@skew_dp_union_mm@k3=104 +POSTHOOK: Output: default@skew_dp_union_mm@k3=107 POSTHOOK: Output: default@skew_dp_union_mm@k3=14 POSTHOOK: Output: default@skew_dp_union_mm@k3=4 POSTHOOK: Output: default@skew_dp_union_mm@k3=97 @@ -674,12 +786,24 @@ POSTHOOK: Output: default@skew_dp_union_mm@k3=98 POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] @@ -700,8 +824,12 @@ PREHOOK: type: QUERY PREHOOK: Input: default@skew_dp_union_mm PREHOOK: Input: default@skew_dp_union_mm@k3=0 PREHOOK: Input: default@skew_dp_union_mm@k3=10 +PREHOOK: Input: default@skew_dp_union_mm@k3=100 PREHOOK: Input: default@skew_dp_union_mm@k3=101 PREHOOK: Input: default@skew_dp_union_mm@k3=102 +PREHOOK: Input: default@skew_dp_union_mm@k3=103 +PREHOOK: Input: default@skew_dp_union_mm@k3=104 +PREHOOK: Input: default@skew_dp_union_mm@k3=107 PREHOOK: Input: default@skew_dp_union_mm@k3=14 PREHOOK: Input: default@skew_dp_union_mm@k3=4 PREHOOK: Input: default@skew_dp_union_mm@k3=97 @@ -712,8 +840,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@skew_dp_union_mm POSTHOOK: Input: default@skew_dp_union_mm@k3=0 POSTHOOK: Input: default@skew_dp_union_mm@k3=10 +POSTHOOK: Input: default@skew_dp_union_mm@k3=100 POSTHOOK: Input: default@skew_dp_union_mm@k3=101 POSTHOOK: Input: default@skew_dp_union_mm@k3=102 +POSTHOOK: Input: default@skew_dp_union_mm@k3=103 +POSTHOOK: Input: default@skew_dp_union_mm@k3=104 +POSTHOOK: Input: default@skew_dp_union_mm@k3=107 POSTHOOK: Input: default@skew_dp_union_mm@k3=14 POSTHOOK: Input: default@skew_dp_union_mm@k3=4 POSTHOOK: Input: default@skew_dp_union_mm@k3=97 @@ -727,6 +859,10 @@ POSTHOOK: Input: default@skew_dp_union_mm@k3=98 98 98 98 98 98 99 100 101 99 100 101 102 +100 100 100 100 +101 102 103 104 +103 103 103 103 +104 105 106 107 PREHOOK: query: drop table skew_dp_union_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@skew_dp_union_mm @@ -748,12 +884,14 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@merge0_mm POSTHOOK: query: insert into table merge0_mm select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@merge0_mm POSTHOOK: Lineage: merge0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from merge0_mm @@ -766,6 +904,8 @@ POSTHOOK: Input: default@merge0_mm #### A masked pattern was here #### 98 97 +100 +103 0 10 PREHOOK: query: insert into table merge0_mm select key from intermediate @@ -773,12 +913,14 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@merge0_mm POSTHOOK: query: insert into table merge0_mm select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@merge0_mm POSTHOOK: Lineage: merge0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] PREHOOK: query: select * from merge0_mm @@ -791,10 +933,14 @@ POSTHOOK: Input: default@merge0_mm #### A masked pattern was here #### 98 97 +100 +103 0 10 98 97 +100 +103 0 10 PREHOOK: query: drop table merge0_mm @@ -818,17 +964,23 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@merge1_mm POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@merge1_mm@key=0 POSTHOOK: Output: default@merge1_mm@key=10 +POSTHOOK: Output: default@merge1_mm@key=100 +POSTHOOK: Output: default@merge1_mm@key=103 POSTHOOK: Output: default@merge1_mm@key=97 POSTHOOK: Output: default@merge1_mm@key=98 POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=100).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=103).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] @@ -837,6 +989,8 @@ PREHOOK: type: QUERY PREHOOK: Input: default@merge1_mm PREHOOK: Input: default@merge1_mm@key=0 PREHOOK: Input: default@merge1_mm@key=10 +PREHOOK: Input: default@merge1_mm@key=100 +PREHOOK: Input: default@merge1_mm@key=103 PREHOOK: Input: default@merge1_mm@key=97 PREHOOK: Input: default@merge1_mm@key=98 #### A masked pattern was here #### @@ -845,9 +999,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@merge1_mm POSTHOOK: Input: default@merge1_mm@key=0 POSTHOOK: Input: default@merge1_mm@key=10 +POSTHOOK: Input: default@merge1_mm@key=100 +POSTHOOK: Input: default@merge1_mm@key=103 POSTHOOK: Input: default@merge1_mm@key=97 POSTHOOK: Input: default@merge1_mm@key=98 #### A masked pattern was here #### +100 100 +103 103 97 97 98 98 0 0 @@ -857,17 +1015,23 @@ PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: default@merge1_mm POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@merge1_mm@key=0 POSTHOOK: Output: default@merge1_mm@key=10 +POSTHOOK: Output: default@merge1_mm@key=100 +POSTHOOK: Output: default@merge1_mm@key=103 POSTHOOK: Output: default@merge1_mm@key=97 POSTHOOK: Output: default@merge1_mm@key=98 POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=100).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=103).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] @@ -876,6 +1040,8 @@ PREHOOK: type: QUERY PREHOOK: Input: default@merge1_mm PREHOOK: Input: default@merge1_mm@key=0 PREHOOK: Input: default@merge1_mm@key=10 +PREHOOK: Input: default@merge1_mm@key=100 +PREHOOK: Input: default@merge1_mm@key=103 PREHOOK: Input: default@merge1_mm@key=97 PREHOOK: Input: default@merge1_mm@key=98 #### A masked pattern was here #### @@ -884,9 +1050,15 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@merge1_mm POSTHOOK: Input: default@merge1_mm@key=0 POSTHOOK: Input: default@merge1_mm@key=10 +POSTHOOK: Input: default@merge1_mm@key=100 +POSTHOOK: Input: default@merge1_mm@key=103 POSTHOOK: Input: default@merge1_mm@key=97 POSTHOOK: Input: default@merge1_mm@key=98 #### A masked pattern was here #### +100 100 +100 100 +103 103 +103 103 97 97 97 97 98 98 @@ -918,6 +1090,7 @@ PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: database:default PREHOOK: Output: default@ctas0_mm POSTHOOK: query: create table ctas0_mm tblproperties ('hivecommit'='true') as select * from intermediate @@ -925,6 +1098,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas0_mm POSTHOOK: Lineage: ctas0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] @@ -939,6 +1113,8 @@ POSTHOOK: Input: default@ctas0_mm #### A masked pattern was here #### 98 455 97 455 +100 457 +103 457 0 456 10 456 PREHOOK: query: drop table ctas0_mm @@ -959,6 +1135,7 @@ PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 PREHOOK: Output: database:default PREHOOK: Output: default@ctas1_mm POSTHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as @@ -967,6 +1144,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas1_mm POSTHOOK: Lineage: ctas1_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] @@ -981,10 +1159,14 @@ POSTHOOK: Input: default@ctas1_mm #### A masked pattern was here #### 98 455 97 455 +100 457 +103 457 0 456 10 456 98 455 97 455 +100 457 +103 457 0 456 10 456 PREHOOK: query: drop table ctas1_mm @@ -995,15 +1177,11 @@ POSTHOOK: query: drop table ctas1_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@ctas1_mm POSTHOOK: Output: default@ctas1_mm -PREHOOK: query: -- TODO load, multi-insert, buckets - -drop table intermediate +PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: -- TODO load, multi-insert, buckets - -drop table intermediate +POSTHOOK: query: drop table intermediate POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@intermediate POSTHOOK: Output: default@intermediate diff --git a/ql/src/test/results/clientpositive/llap/mm_all2.q.out b/ql/src/test/results/clientpositive/llap/mm_all2.q.out new file mode 100644 index 000000000000..95ce33ae3d36 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/mm_all2.q.out @@ -0,0 +1,503 @@ +PREHOOK: query: -- Force multiple writers when reading +drop table intermediate +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Force multiple writers when reading +drop table intermediate +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermediate +POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermediate +PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=455 +POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=455 +POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=456 +POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=456 +POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=457 +POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=457 +POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: drop table bucket0_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table bucket0_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table bucket0_mm(key int, id int) +clustered by (key) into 2 buckets +tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket0_mm +POSTHOOK: query: create table bucket0_mm(key int, id int) +clustered by (key) into 2 buckets +tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket0_mm +PREHOOK: query: insert into table bucket0_mm select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket0_mm +POSTHOOK: query: insert into table bucket0_mm select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket0_mm +POSTHOOK: Lineage: bucket0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +100 100 +0 0 +10 10 +98 98 +103 103 +97 97 +PREHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +100 100 +0 0 +10 10 +98 98 +PREHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +103 103 +97 97 +PREHOOK: query: insert into table bucket0_mm select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket0_mm +POSTHOOK: query: insert into table bucket0_mm select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket0_mm +POSTHOOK: Lineage: bucket0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +100 100 +0 0 +10 10 +98 98 +103 103 +97 97 +98 98 +0 0 +10 10 +100 100 +97 97 +103 103 +PREHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +100 100 +0 0 +10 10 +98 98 +98 98 +0 0 +10 10 +100 100 +PREHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +103 103 +97 97 +97 97 +103 103 +PREHOOK: query: drop table bucket0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bucket0_mm +PREHOOK: Output: default@bucket0_mm +POSTHOOK: query: drop table bucket0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@bucket0_mm +POSTHOOK: Output: default@bucket0_mm +PREHOOK: query: drop table bucket1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table bucket1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table bucket1_mm(key int, id int) partitioned by (key2 int) +clustered by (key) sorted by (key) into 2 buckets +tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket1_mm +POSTHOOK: query: create table bucket1_mm(key int, id int) partitioned by (key2 int) +clustered by (key) sorted by (key) into 2 buckets +tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket1_mm +PREHOOK: query: insert into table bucket1_mm partition (key2) +select key + 1, key, key - 1 from intermediate +union all +select key - 1, key, key + 1 from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket1_mm +POSTHOOK: query: insert into table bucket1_mm partition (key2) +select key + 1, key, key - 1 from intermediate +union all +select key - 1, key, key + 1 from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket1_mm@key2=-1 +POSTHOOK: Output: default@bucket1_mm@key2=1 +POSTHOOK: Output: default@bucket1_mm@key2=101 +POSTHOOK: Output: default@bucket1_mm@key2=102 +POSTHOOK: Output: default@bucket1_mm@key2=104 +POSTHOOK: Output: default@bucket1_mm@key2=11 +POSTHOOK: Output: default@bucket1_mm@key2=9 +POSTHOOK: Output: default@bucket1_mm@key2=96 +POSTHOOK: Output: default@bucket1_mm@key2=97 +POSTHOOK: Output: default@bucket1_mm@key2=98 +POSTHOOK: Output: default@bucket1_mm@key2=99 +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket1_mm +PREHOOK: Input: default@bucket1_mm@key2=-1 +PREHOOK: Input: default@bucket1_mm@key2=1 +PREHOOK: Input: default@bucket1_mm@key2=101 +PREHOOK: Input: default@bucket1_mm@key2=102 +PREHOOK: Input: default@bucket1_mm@key2=104 +PREHOOK: Input: default@bucket1_mm@key2=11 +PREHOOK: Input: default@bucket1_mm@key2=9 +PREHOOK: Input: default@bucket1_mm@key2=96 +PREHOOK: Input: default@bucket1_mm@key2=97 +PREHOOK: Input: default@bucket1_mm@key2=98 +PREHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Input: default@bucket1_mm@key2=-1 +POSTHOOK: Input: default@bucket1_mm@key2=1 +POSTHOOK: Input: default@bucket1_mm@key2=101 +POSTHOOK: Input: default@bucket1_mm@key2=102 +POSTHOOK: Input: default@bucket1_mm@key2=104 +POSTHOOK: Input: default@bucket1_mm@key2=11 +POSTHOOK: Input: default@bucket1_mm@key2=9 +POSTHOOK: Input: default@bucket1_mm@key2=96 +POSTHOOK: Input: default@bucket1_mm@key2=97 +POSTHOOK: Input: default@bucket1_mm@key2=98 +POSTHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +1 0 -1 +-1 0 1 +99 100 101 +104 103 102 +102 103 104 +9 10 11 +11 10 9 +98 97 96 +99 98 97 +96 97 98 +97 98 99 +101 100 99 +PREHOOK: query: select * from bucket1_mm tablesample (bucket 1 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket1_mm +PREHOOK: Input: default@bucket1_mm@key2=-1 +PREHOOK: Input: default@bucket1_mm@key2=1 +PREHOOK: Input: default@bucket1_mm@key2=101 +PREHOOK: Input: default@bucket1_mm@key2=102 +PREHOOK: Input: default@bucket1_mm@key2=104 +PREHOOK: Input: default@bucket1_mm@key2=11 +PREHOOK: Input: default@bucket1_mm@key2=9 +PREHOOK: Input: default@bucket1_mm@key2=96 +PREHOOK: Input: default@bucket1_mm@key2=97 +PREHOOK: Input: default@bucket1_mm@key2=98 +PREHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket1_mm tablesample (bucket 1 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Input: default@bucket1_mm@key2=-1 +POSTHOOK: Input: default@bucket1_mm@key2=1 +POSTHOOK: Input: default@bucket1_mm@key2=101 +POSTHOOK: Input: default@bucket1_mm@key2=102 +POSTHOOK: Input: default@bucket1_mm@key2=104 +POSTHOOK: Input: default@bucket1_mm@key2=11 +POSTHOOK: Input: default@bucket1_mm@key2=9 +POSTHOOK: Input: default@bucket1_mm@key2=96 +POSTHOOK: Input: default@bucket1_mm@key2=97 +POSTHOOK: Input: default@bucket1_mm@key2=98 +POSTHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +104 103 102 +102 103 104 +98 97 96 +96 97 98 +PREHOOK: query: select * from bucket1_mm tablesample (bucket 2 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket1_mm +PREHOOK: Input: default@bucket1_mm@key2=-1 +PREHOOK: Input: default@bucket1_mm@key2=1 +PREHOOK: Input: default@bucket1_mm@key2=101 +PREHOOK: Input: default@bucket1_mm@key2=102 +PREHOOK: Input: default@bucket1_mm@key2=104 +PREHOOK: Input: default@bucket1_mm@key2=11 +PREHOOK: Input: default@bucket1_mm@key2=9 +PREHOOK: Input: default@bucket1_mm@key2=96 +PREHOOK: Input: default@bucket1_mm@key2=97 +PREHOOK: Input: default@bucket1_mm@key2=98 +PREHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket1_mm tablesample (bucket 2 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Input: default@bucket1_mm@key2=-1 +POSTHOOK: Input: default@bucket1_mm@key2=1 +POSTHOOK: Input: default@bucket1_mm@key2=101 +POSTHOOK: Input: default@bucket1_mm@key2=102 +POSTHOOK: Input: default@bucket1_mm@key2=104 +POSTHOOK: Input: default@bucket1_mm@key2=11 +POSTHOOK: Input: default@bucket1_mm@key2=9 +POSTHOOK: Input: default@bucket1_mm@key2=96 +POSTHOOK: Input: default@bucket1_mm@key2=97 +POSTHOOK: Input: default@bucket1_mm@key2=98 +POSTHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +1 0 -1 +-1 0 1 +99 100 101 +9 10 11 +11 10 9 +99 98 97 +97 98 99 +101 100 99 +PREHOOK: query: drop table bucket1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bucket1_mm +PREHOOK: Output: default@bucket1_mm +POSTHOOK: query: drop table bucket1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Output: default@bucket1_mm +PREHOOK: query: drop table bucket2_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table bucket2_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table bucket2_mm(key int, id int) +clustered by (key) into 10 buckets +tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket2_mm +POSTHOOK: query: create table bucket2_mm(key int, id int) +clustered by (key) into 10 buckets +tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket2_mm +PREHOOK: query: insert into table bucket2_mm select key, key from intermediate where key == 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket2_mm +POSTHOOK: query: insert into table bucket2_mm select key, key from intermediate where key == 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket2_mm +POSTHOOK: Lineage: bucket2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +0 0 +PREHOOK: query: select * from bucket2_mm tablesample (bucket 1 out of 10) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm tablesample (bucket 1 out of 10) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +0 0 +PREHOOK: query: select * from bucket2_mm tablesample (bucket 4 out of 10) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm tablesample (bucket 4 out of 10) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +PREHOOK: query: insert into table bucket2_mm select key, key from intermediate where key in (0, 103) +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket2_mm +POSTHOOK: query: insert into table bucket2_mm select key, key from intermediate where key in (0, 103) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket2_mm +POSTHOOK: Lineage: bucket2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +0 0 +0 0 +103 103 +PREHOOK: query: select * from bucket2_mm tablesample (bucket 1 out of 10) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm tablesample (bucket 1 out of 10) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +0 0 +0 0 +PREHOOK: query: select * from bucket2_mm tablesample (bucket 4 out of 10) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm tablesample (bucket 4 out of 10) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +103 103 +PREHOOK: query: drop table bucket2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bucket2_mm +PREHOOK: Output: default@bucket2_mm +POSTHOOK: query: drop table bucket2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@bucket2_mm +POSTHOOK: Output: default@bucket2_mm +PREHOOK: query: -- TODO# future + + + +-- TODO load, multi-insert, buckets + +drop table intermediate +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermediate +PREHOOK: Output: default@intermediate +POSTHOOK: query: -- TODO# future + + + +-- TODO load, multi-insert, buckets + +drop table intermediate +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermediate +POSTHOOK: Output: default@intermediate diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index 5b51fa360311..1bbef9d70b00 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -28,48 +28,205 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as - select * from intermediate union all select * from intermediate -PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=457 +POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=457 +POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: drop table bucket1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table bucket1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table bucket1_mm(key int, id int) partitioned by (key2 int) +clustered by (key) sorted by (key) into 2 buckets +tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket1_mm +POSTHOOK: query: create table bucket1_mm(key int, id int) partitioned by (key2 int) +clustered by (key) sorted by (key) into 2 buckets +tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket1_mm +PREHOOK: query: insert into table bucket1_mm partition (key2) +select key + 1, key, key - 1 from intermediate +union all +select key - 1, key, key + 1 from intermediate +PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: database:default -PREHOOK: Output: default@ctas1_mm -POSTHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as - select * from intermediate union all select * from intermediate -POSTHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket1_mm +POSTHOOK: query: insert into table bucket1_mm partition (key2) +select key + 1, key, key - 1 from intermediate +union all +select key - 1, key, key + 1 from intermediate +POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: database:default -POSTHOOK: Output: default@ctas1_mm -POSTHOOK: Lineage: ctas1_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: ctas1_mm.p EXPRESSION [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -PREHOOK: query: select * from ctas1_mm +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket1_mm@key2=-1 +POSTHOOK: Output: default@bucket1_mm@key2=1 +POSTHOOK: Output: default@bucket1_mm@key2=101 +POSTHOOK: Output: default@bucket1_mm@key2=102 +POSTHOOK: Output: default@bucket1_mm@key2=104 +POSTHOOK: Output: default@bucket1_mm@key2=11 +POSTHOOK: Output: default@bucket1_mm@key2=9 +POSTHOOK: Output: default@bucket1_mm@key2=96 +POSTHOOK: Output: default@bucket1_mm@key2=97 +POSTHOOK: Output: default@bucket1_mm@key2=98 +POSTHOOK: Output: default@bucket1_mm@key2=99 +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket1_mm +PREHOOK: Input: default@bucket1_mm@key2=-1 +PREHOOK: Input: default@bucket1_mm@key2=1 +PREHOOK: Input: default@bucket1_mm@key2=101 +PREHOOK: Input: default@bucket1_mm@key2=102 +PREHOOK: Input: default@bucket1_mm@key2=104 +PREHOOK: Input: default@bucket1_mm@key2=11 +PREHOOK: Input: default@bucket1_mm@key2=9 +PREHOOK: Input: default@bucket1_mm@key2=96 +PREHOOK: Input: default@bucket1_mm@key2=97 +PREHOOK: Input: default@bucket1_mm@key2=98 +PREHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Input: default@bucket1_mm@key2=-1 +POSTHOOK: Input: default@bucket1_mm@key2=1 +POSTHOOK: Input: default@bucket1_mm@key2=101 +POSTHOOK: Input: default@bucket1_mm@key2=102 +POSTHOOK: Input: default@bucket1_mm@key2=104 +POSTHOOK: Input: default@bucket1_mm@key2=11 +POSTHOOK: Input: default@bucket1_mm@key2=9 +POSTHOOK: Input: default@bucket1_mm@key2=96 +POSTHOOK: Input: default@bucket1_mm@key2=97 +POSTHOOK: Input: default@bucket1_mm@key2=98 +POSTHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +1 0 -1 +-1 0 1 +99 100 101 +104 103 102 +102 103 104 +9 10 11 +11 10 9 +98 97 96 +99 98 97 +96 97 98 +97 98 99 +101 100 99 +PREHOOK: query: select * from bucket1_mm tablesample (bucket 1 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket1_mm +PREHOOK: Input: default@bucket1_mm@key2=-1 +PREHOOK: Input: default@bucket1_mm@key2=1 +PREHOOK: Input: default@bucket1_mm@key2=101 +PREHOOK: Input: default@bucket1_mm@key2=102 +PREHOOK: Input: default@bucket1_mm@key2=104 +PREHOOK: Input: default@bucket1_mm@key2=11 +PREHOOK: Input: default@bucket1_mm@key2=9 +PREHOOK: Input: default@bucket1_mm@key2=96 +PREHOOK: Input: default@bucket1_mm@key2=97 +PREHOOK: Input: default@bucket1_mm@key2=98 +PREHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket1_mm tablesample (bucket 1 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Input: default@bucket1_mm@key2=-1 +POSTHOOK: Input: default@bucket1_mm@key2=1 +POSTHOOK: Input: default@bucket1_mm@key2=101 +POSTHOOK: Input: default@bucket1_mm@key2=102 +POSTHOOK: Input: default@bucket1_mm@key2=104 +POSTHOOK: Input: default@bucket1_mm@key2=11 +POSTHOOK: Input: default@bucket1_mm@key2=9 +POSTHOOK: Input: default@bucket1_mm@key2=96 +POSTHOOK: Input: default@bucket1_mm@key2=97 +POSTHOOK: Input: default@bucket1_mm@key2=98 +POSTHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +104 103 102 +102 103 104 +98 97 96 +96 97 98 +PREHOOK: query: select * from bucket1_mm tablesample (bucket 2 out of 2) s PREHOOK: type: QUERY -PREHOOK: Input: default@ctas1_mm +PREHOOK: Input: default@bucket1_mm +PREHOOK: Input: default@bucket1_mm@key2=-1 +PREHOOK: Input: default@bucket1_mm@key2=1 +PREHOOK: Input: default@bucket1_mm@key2=101 +PREHOOK: Input: default@bucket1_mm@key2=102 +PREHOOK: Input: default@bucket1_mm@key2=104 +PREHOOK: Input: default@bucket1_mm@key2=11 +PREHOOK: Input: default@bucket1_mm@key2=9 +PREHOOK: Input: default@bucket1_mm@key2=96 +PREHOOK: Input: default@bucket1_mm@key2=97 +PREHOOK: Input: default@bucket1_mm@key2=98 +PREHOOK: Input: default@bucket1_mm@key2=99 #### A masked pattern was here #### -POSTHOOK: query: select * from ctas1_mm +POSTHOOK: query: select * from bucket1_mm tablesample (bucket 2 out of 2) s POSTHOOK: type: QUERY -POSTHOOK: Input: default@ctas1_mm +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Input: default@bucket1_mm@key2=-1 +POSTHOOK: Input: default@bucket1_mm@key2=1 +POSTHOOK: Input: default@bucket1_mm@key2=101 +POSTHOOK: Input: default@bucket1_mm@key2=102 +POSTHOOK: Input: default@bucket1_mm@key2=104 +POSTHOOK: Input: default@bucket1_mm@key2=11 +POSTHOOK: Input: default@bucket1_mm@key2=9 +POSTHOOK: Input: default@bucket1_mm@key2=96 +POSTHOOK: Input: default@bucket1_mm@key2=97 +POSTHOOK: Input: default@bucket1_mm@key2=98 +POSTHOOK: Input: default@bucket1_mm@key2=99 #### A masked pattern was here #### -98 455 -97 455 -0 456 -10 456 -98 455 -97 455 -0 456 -10 456 -PREHOOK: query: drop table ctas1_mm +1 0 -1 +-1 0 1 +99 100 101 +9 10 11 +11 10 9 +99 98 97 +97 98 99 +101 100 99 +PREHOOK: query: drop table bucket1_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@ctas1_mm -PREHOOK: Output: default@ctas1_mm -POSTHOOK: query: drop table ctas1_mm +PREHOOK: Input: default@bucket1_mm +PREHOOK: Output: default@bucket1_mm +POSTHOOK: query: drop table bucket1_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@ctas1_mm -POSTHOOK: Output: default@ctas1_mm +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Output: default@bucket1_mm PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate From 9ecffcb1bc44558d7959fa0289f27cc49f54d875 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Mon, 17 Oct 2016 13:54:16 -0700 Subject: [PATCH 12/24] HIVE-14899 : MM: support (or disable) alter table concatenate (Sergey Shelukhin) --- .../queries/clientnegative/mm_concatenate.q | 5 +++++ .../clientnegative/mm_concatenate.q.out | 18 ++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 ql/src/test/queries/clientnegative/mm_concatenate.q create mode 100644 ql/src/test/results/clientnegative/mm_concatenate.q.out diff --git a/ql/src/test/queries/clientnegative/mm_concatenate.q b/ql/src/test/queries/clientnegative/mm_concatenate.q new file mode 100644 index 000000000000..c5807670e678 --- /dev/null +++ b/ql/src/test/queries/clientnegative/mm_concatenate.q @@ -0,0 +1,5 @@ +create table concat_mm (id int) stored as orc tblproperties('hivecommit'='true'); + +insert into table concat_mm select key from src limit 10; + +alter table concat_mm concatenate; diff --git a/ql/src/test/results/clientnegative/mm_concatenate.q.out b/ql/src/test/results/clientnegative/mm_concatenate.q.out new file mode 100644 index 000000000000..073640908799 --- /dev/null +++ b/ql/src/test/results/clientnegative/mm_concatenate.q.out @@ -0,0 +1,18 @@ +PREHOOK: query: create table concat_mm (id int) stored as orc tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@concat_mm +POSTHOOK: query: create table concat_mm (id int) stored as orc tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@concat_mm +PREHOOK: query: insert into table concat_mm select key from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@concat_mm +POSTHOOK: query: insert into table concat_mm select key from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@concat_mm +POSTHOOK: Lineage: concat_mm.id EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: Merge is not supported for MM tables From b6571eaef2659672a9d3cdf36e2fa6f2168f8063 Mon Sep 17 00:00:00 2001 From: Wei Zheng Date: Tue, 18 Oct 2016 15:13:20 -0700 Subject: [PATCH 13/24] HIVE-14878 : integrate MM tables into ACID: add separate ACID type (Wei Zheng) --- .../org/apache/hadoop/hive/conf/HiveConf.java | 10 +- .../TransactionalValidationListener.java | 39 ++++-- .../hadoop/hive/ql/exec/FileSinkOperator.java | 15 ++- .../apache/hadoop/hive/ql/exec/MoveTask.java | 3 +- .../apache/hadoop/hive/ql/io/AcidUtils.java | 44 ++++++- .../hive/ql/parse/SemanticAnalyzer.java | 34 +++--- .../clientpositive/mm_insertonly_acid.q | 16 +++ .../clientpositive/mm_insertonly_acid.q.out | 115 ++++++++++++++++++ 8 files changed, 236 insertions(+), 40 deletions(-) create mode 100644 ql/src/test/queries/clientpositive/mm_insertonly_acid.q create mode 100644 ql/src/test/results/clientpositive/mm_insertonly_acid.q.out diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 23314ed125ef..ccc29f85150c 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1787,10 +1787,12 @@ public static enum ConfVars { HIVE_TXN_OPERATIONAL_PROPERTIES("hive.txn.operational.properties", 0, "Sets the operational properties that control the appropriate behavior for various\n" - + "versions of the Hive ACID subsystem. Setting it to zero will turn on the legacy mode\n" - + "for ACID, while setting it to one will enable a split-update feature found in the newer\n" - + "version of Hive ACID subsystem. Mostly it is intended to be used as an internal property\n" - + "for future versions of ACID. (See HIVE-14035 for details.)"), + + "versions of the Hive ACID subsystem. Mostly it is intended to be used as an internal property\n" + + "for future versions of ACID. (See HIVE-14035 for details.)\n" + + "0: Turn on the legacy mode for ACID\n" + + "1: Enable split-update feature found in the newer version of Hive ACID subsystem\n" + + "2: Hash-based merge, which combines delta files using GRACE hash join based approach (not implemented)\n" + + "3: Make the table 'quarter-acid' as it only supports insert. But it doesn't require ORC or bucketing."), HIVE_MAX_OPEN_TXNS("hive.max.open.txns", 100000, "Maximum number of open transactions. If \n" + "current open transactions reach this limit, future open transaction requests will be \n" + diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java index 0f08f434e706..f9424791f4e0 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java @@ -40,6 +40,7 @@ public final class TransactionalValidationListener extends MetaStorePreEventList // These constants are also imported by org.apache.hadoop.hive.ql.io.AcidUtils. public static final String DEFAULT_TRANSACTIONAL_PROPERTY = "default"; public static final String LEGACY_TRANSACTIONAL_PROPERTY = "legacy"; + public static final String INSERTONLY_TRANSACTIONAL_PROPERTY = "insert_only"; TransactionalValidationListener(Configuration conf) { super(conf); @@ -105,8 +106,11 @@ private void handleAlterTableTransactionalProp(PreAlterTableEvent context) throw } if ("true".equalsIgnoreCase(transactionalValue)) { if (!conformToAcid(newTable)) { - throw new MetaException("The table must be bucketed and stored using an ACID compliant" + - " format (such as ORC)"); + // INSERT_ONLY tables don't have to conform to ACID requirement like ORC or bucketing + if (transactionalPropertiesValue == null || !"insert_only".equalsIgnoreCase(transactionalPropertiesValue)) { + throw new MetaException("The table must be bucketed and stored using an ACID compliant" + + " format (such as ORC)"); + } } if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { @@ -172,32 +176,40 @@ private void handleCreateTableTransactionalProp(PreCreateTableEvent context) thr if (parameters == null || parameters.isEmpty()) { return; } - String transactionalValue = null; - boolean transactionalPropFound = false; + String transactional = null; + String transactionalProperties = null; Set keys = new HashSet<>(parameters.keySet()); for(String key : keys) { - if(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.equalsIgnoreCase(key)) { - transactionalPropFound = true; - transactionalValue = parameters.get(key); + // Get the "transactional" tblproperties value + if (hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.equalsIgnoreCase(key)) { + transactional = parameters.get(key); parameters.remove(key); } + + // Get the "transactional_properties" tblproperties value + if (hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES.equalsIgnoreCase(key)) { + transactionalProperties = parameters.get(key); + } } - if (!transactionalPropFound) { + if (transactional == null) { return; } - if ("false".equalsIgnoreCase(transactionalValue)) { + if ("false".equalsIgnoreCase(transactional)) { // just drop transactional=false. For backward compatibility in case someone has scripts // with transactional=false LOG.info("'transactional'='false' is no longer a valid property and will be ignored"); return; } - if ("true".equalsIgnoreCase(transactionalValue)) { + if ("true".equalsIgnoreCase(transactional)) { if (!conformToAcid(newTable)) { - throw new MetaException("The table must be bucketed and stored using an ACID compliant" + - " format (such as ORC)"); + // INSERT_ONLY tables don't have to conform to ACID requirement like ORC or bucketing + if (transactionalProperties == null || !"insert_only".equalsIgnoreCase(transactionalProperties)) { + throw new MetaException("The table must be bucketed and stored using an ACID compliant" + + " format (such as ORC)"); + } } if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { @@ -211,7 +223,7 @@ private void handleCreateTableTransactionalProp(PreCreateTableEvent context) thr return; } - // transactional prop is found, but the value is not in expected range + // transactional is found, but the value is not in expected range throw new MetaException("'transactional' property of TBLPROPERTIES may only have value 'true'"); } @@ -277,6 +289,7 @@ private String validateTransactionalProperties(String transactionalProperties) { switch (transactionalProperties) { case DEFAULT_TRANSACTIONAL_PROPERTY: case LEGACY_TRANSACTIONAL_PROPERTY: + case INSERTONLY_TRANSACTIONAL_PROPERTY: isValid = true; break; default: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index ef6473af9cf7..c54187fe86b1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -678,7 +678,8 @@ protected void createBucketForFileIdx(FSPaths fsp, int filesIdx) Utilities.copyTableJobPropertiesToConf(conf.getTableInfo(), jc); // only create bucket files only if no dynamic partitions, // buckets of dynamic partitions will be created for each newly created partition - if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) { + if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || + conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) { fsp.outWriters[filesIdx] = HiveFileFormatUtils.getHiveRecordWriter(jc, conf.getTableInfo(), outputClass, conf, fsp.outPaths[filesIdx], reporter); // If the record writer provides stats, get it from there instead of the serde @@ -821,7 +822,8 @@ public void process(Object row, int tag) throws HiveException { // for a given operator branch prediction should work quite nicely on it. // RecordUpdateer expects to get the actual row, not a serialized version of it. Thus we // pass the row rather than recordValue. - if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) { + if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || + conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) { rowOutWriters[writerOffset].write(recordValue); } else if (conf.getWriteType() == AcidUtils.Operation.INSERT) { fpaths.updaters[writerOffset].insert(conf.getTransactionId(), row); @@ -865,7 +867,8 @@ public void process(Object row, int tag) throws HiveException { protected boolean areAllTrue(boolean[] statsFromRW) { // If we are doing an acid operation they will always all be true as RecordUpdaters always // collect stats - if (conf.getWriteType() != AcidUtils.Operation.NOT_ACID) { + if (conf.getWriteType() != AcidUtils.Operation.NOT_ACID && + conf.getWriteType() != AcidUtils.Operation.INSERT_ONLY) { return true; } for(boolean b : statsFromRW) { @@ -1008,7 +1011,8 @@ protected FSPaths getDynOutPaths(List row, String lbDirName) throws Hive // stats from the record writer and store in the previous fsp that is cached if (conf.isGatherStats() && isCollectRWStats) { SerDeStats stats = null; - if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) { + if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || + conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) { RecordWriter outWriter = prevFsp.outWriters[0]; if (outWriter != null) { stats = ((StatsProvidingRecordWriter) outWriter).getStats(); @@ -1112,7 +1116,8 @@ public void closeOp(boolean abort) throws HiveException { // record writer already gathers the statistics, it can simply return the // accumulated statistics which will be aggregated in case of spray writers if (conf.isGatherStats() && isCollectRWStats) { - if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) { + if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || + conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) { for (int idx = 0; idx < fsp.outWriters.length; idx++) { RecordWriter outWriter = fsp.outWriters[idx]; if (outWriter != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index f2b8ca382c2b..74a650d9982a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -389,7 +389,8 @@ private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd, db.loadSinglePartition(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getPartitionSpec(), tbd.getReplace(), tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(), - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, + (work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID && + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.INSERT_ONLY), hasFollowingStatsTask(), tbd.getMmWriteId()); Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index 7351bbe7efd9..ecbc21630b3b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -275,8 +275,9 @@ static long parseBase(Path path) { return result; } + // INSERT_ONLY is a special operation which we only support INSERT operations, no UPDATE/DELETE public enum Operation { - NOT_ACID, INSERT, UPDATE, DELETE; + NOT_ACID, INSERT, UPDATE, DELETE, INSERT_ONLY } /** @@ -344,8 +345,11 @@ public static class AcidOperationalProperties { public static final String SPLIT_UPDATE_STRING = "split_update"; public static final int HASH_BASED_MERGE_BIT = 0x02; public static final String HASH_BASED_MERGE_STRING = "hash_merge"; + public static final int INSERT_ONLY_BIT = 0x03; + public static final String INSERT_ONLY_STRING = "insert_only"; public static final String DEFAULT_VALUE_STRING = TransactionalValidationListener.DEFAULT_TRANSACTIONAL_PROPERTY; public static final String LEGACY_VALUE_STRING = TransactionalValidationListener.LEGACY_TRANSACTIONAL_PROPERTY; + public static final String INSERTONLY_VALUE_STRING = TransactionalValidationListener.INSERTONLY_TRANSACTIONAL_PROPERTY; private AcidOperationalProperties() { } @@ -373,6 +377,17 @@ public static AcidOperationalProperties getDefault() { return obj; } + /** + * Returns an acidOperationalProperties object for tables that uses ACID framework but only + * supports INSERT operation and does not require ORC or bucketing + * @return the acidOperationalProperties object + */ + public static AcidOperationalProperties getInsertOnly() { + AcidOperationalProperties obj = new AcidOperationalProperties(); + obj.setInsertOnly(true); + return obj; + } + /** * Returns an acidOperationalProperties object that is represented by an encoded string. * @param propertiesStr an encoded string representing the acidOperationalProperties. @@ -388,6 +403,9 @@ public static AcidOperationalProperties parseString(String propertiesStr) { if (propertiesStr.equalsIgnoreCase(LEGACY_VALUE_STRING)) { return AcidOperationalProperties.getLegacy(); } + if (propertiesStr.equalsIgnoreCase(INSERTONLY_VALUE_STRING)) { + return AcidOperationalProperties.getInsertOnly(); + } AcidOperationalProperties obj = new AcidOperationalProperties(); String[] options = propertiesStr.split("\\|"); for (String option : options) { @@ -448,6 +466,12 @@ public AcidOperationalProperties setHashBasedMerge(boolean isHashBasedMerge) { return this; } + public AcidOperationalProperties setInsertOnly(boolean isInsertOnly) { + description = (isInsertOnly + ? (description | INSERT_ONLY_BIT) : (description & ~INSERT_ONLY_BIT)); + return this; + } + public boolean isSplitUpdate() { return (description & SPLIT_UPDATE_BIT) > 0; } @@ -456,6 +480,10 @@ public boolean isHashBasedMerge() { return (description & HASH_BASED_MERGE_BIT) > 0; } + public boolean isInsertOnly() { + return (description & INSERT_ONLY_BIT) > 0; + } + public int toInt() { return description; } @@ -469,6 +497,9 @@ public String toString() { if (isHashBasedMerge()) { str.append("|" + HASH_BASED_MERGE_STRING); } + if (isInsertOnly()) { + str.append("|" + INSERT_ONLY_STRING); + } return str.toString(); } } @@ -1077,6 +1108,17 @@ public static boolean isAcidTable(Table table) { return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true"); } + /** + * Checks if a table is an ACID table that only supports INSERT, but not UPDATE/DELETE + * @param table table + * @return true if table is an INSERT_ONLY table, false otherwise + */ + public static boolean isInsertOnlyTable(Table table) { + String transactionalProp = table.getProperty(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES); + return transactionalProp != null && + AcidUtils.AcidOperationalProperties.INSERT_ONLY_STRING.equals(transactionalProp); + } + /** * Sets the acidOperationalProperties in the configuration object argument. * @param conf Mutable configuration object diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 62faf8959e9f..f74c0a951919 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -6455,7 +6455,8 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb, nullOrder.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? 'a' : 'z'); } input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), nullOrder.toString(), - maxReducers, (AcidUtils.isAcidTable(dest_tab) ? getAcidType() : AcidUtils.Operation.NOT_ACID)); + maxReducers, (AcidUtils.isAcidTable(dest_tab) ? + getAcidType(dest_tab, table_desc.getOutputFileFormatClass()) : AcidUtils.Operation.NOT_ACID)); reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator)input.getParentOperators().get(0)); ctx.setMultiFileSpray(multiFileSpray); ctx.setNumFiles(numFiles); @@ -6588,8 +6589,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) if (!isNonNativeTable) { AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; if (destTableIsAcid) { - acidOp = getAcidType(table_desc.getOutputFileFormatClass()); - checkAcidConstraints(qb, table_desc, dest_tab); + acidOp = getAcidType(dest_tab, table_desc.getOutputFileFormatClass()); + checkAcidConstraints(qb, table_desc, dest_tab, acidOp); } try { mmWriteId = getMmWriteId(dest_tab, isMmTable); @@ -6648,8 +6649,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_part.isStoredAsSubDirectories(), conf); AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; if (destTableIsAcid) { - acidOp = getAcidType(table_desc.getOutputFileFormatClass()); - checkAcidConstraints(qb, table_desc, dest_tab); + acidOp = getAcidType(dest_tab, table_desc.getOutputFileFormatClass()); + checkAcidConstraints(qb, table_desc, dest_tab, acidOp); } try { mmWriteId = getMmWriteId(dest_tab, isMmTable); @@ -6945,7 +6946,7 @@ private FileSinkDesc createFileSinkDesc(TableDesc table_desc, fileSinkDesc.setHiveServerQuery(SessionState.get().isHiveServerQuery()); // If this is an insert, update, or delete on an ACID table then mark that so the // FileSinkOperator knows how to properly write to it. - if (destTableIsAcid) { + if (destTableIsAcid && !AcidUtils.isInsertOnlyTable(dest_part.getTable())) { AcidUtils.Operation wt = updating() ? AcidUtils.Operation.UPDATE : (deleting() ? AcidUtils.Operation.DELETE : AcidUtils.Operation.INSERT); fileSinkDesc.setWriteType(wt); @@ -7141,7 +7142,7 @@ String fixCtasColumnName(String colName) { // This method assumes you have already decided that this is an Acid write. Don't call it if // that isn't true. private void checkAcidConstraints(QB qb, TableDesc tableDesc, - Table table) throws SemanticException { + Table table, AcidUtils.Operation acidOp) throws SemanticException { String tableName = tableDesc.getTableName(); if (!qb.getParseInfo().isInsertIntoTable(tableName)) { LOG.debug("Couldn't find table " + tableName + " in insertIntoTable"); @@ -7158,15 +7159,14 @@ These props are now enabled elsewhere (see commit diffs). It would be better in */ conf.set(AcidUtils.CONF_ACID_KEY, "true"); - if (table.getNumBuckets() < 1) { - throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, table.getTableName()); - } - if (table.getSortCols() != null && table.getSortCols().size() > 0) { - throw new SemanticException(ErrorMsg.ACID_NO_SORTED_BUCKETS, table.getTableName()); + if (!Operation.NOT_ACID.equals(acidOp) && !Operation.INSERT_ONLY.equals(acidOp)) { + if (table.getNumBuckets() < 1) { + throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, table.getTableName()); + } + if (table.getSortCols() != null && table.getSortCols().size() > 0) { + throw new SemanticException(ErrorMsg.ACID_NO_SORTED_BUCKETS, table.getTableName()); + } } - - - } /** @@ -13118,9 +13118,11 @@ private AcidUtils.Operation getAcidType() { AcidUtils.Operation.INSERT); } - private AcidUtils.Operation getAcidType(Class of) { + private AcidUtils.Operation getAcidType(Table table, Class of) { if (SessionState.get() == null || !SessionState.get().getTxnMgr().supportsAcid()) { return AcidUtils.Operation.NOT_ACID; + } else if (AcidUtils.isInsertOnlyTable(table)) { + return AcidUtils.Operation.INSERT_ONLY; } else if (isAcidOutputFormat(of)) { return getAcidType(); } else { diff --git a/ql/src/test/queries/clientpositive/mm_insertonly_acid.q b/ql/src/test/queries/clientpositive/mm_insertonly_acid.q new file mode 100644 index 000000000000..7da99c522d16 --- /dev/null +++ b/ql/src/test/queries/clientpositive/mm_insertonly_acid.q @@ -0,0 +1,16 @@ +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.fetch.task.conversion=none; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + + +drop table qtr_acid; +create table qtr_acid (key int) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +insert into table qtr_acid partition(p='123') select distinct key from src where key > 0 order by key asc limit 10; +insert into table qtr_acid partition(p='456') select distinct key from src where key > 0 order by key desc limit 10; +explain +select * from qtr_acid order by key; +select * from qtr_acid order by key; +drop table qtr_acid; \ No newline at end of file diff --git a/ql/src/test/results/clientpositive/mm_insertonly_acid.q.out b/ql/src/test/results/clientpositive/mm_insertonly_acid.q.out new file mode 100644 index 000000000000..6f7d198b7aa8 --- /dev/null +++ b/ql/src/test/results/clientpositive/mm_insertonly_acid.q.out @@ -0,0 +1,115 @@ +PREHOOK: query: drop table qtr_acid +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table qtr_acid +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table qtr_acid (key int) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@qtr_acid +POSTHOOK: query: create table qtr_acid (key int) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@qtr_acid +PREHOOK: query: insert into table qtr_acid partition(p='123') select distinct key from src where key > 0 order by key asc limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@qtr_acid@p=123 +POSTHOOK: query: insert into table qtr_acid partition(p='123') select distinct key from src where key > 0 order by key asc limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@qtr_acid@p=123 +POSTHOOK: Lineage: qtr_acid PARTITION(p=123).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table qtr_acid partition(p='456') select distinct key from src where key > 0 order by key desc limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@qtr_acid@p=456 +POSTHOOK: query: insert into table qtr_acid partition(p='456') select distinct key from src where key > 0 order by key desc limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@qtr_acid@p=456 +POSTHOOK: Lineage: qtr_acid PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: explain +select * from qtr_acid order by key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from qtr_acid order by key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: qtr_acid + Statistics: Num rows: 16 Data size: 67 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), p (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 16 Data size: 67 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 16 Data size: 67 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: int) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 16 Data size: 67 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 16 Data size: 67 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from qtr_acid order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@qtr_acid +PREHOOK: Input: default@qtr_acid@p=123 +PREHOOK: Input: default@qtr_acid@p=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from qtr_acid order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@qtr_acid +POSTHOOK: Input: default@qtr_acid@p=123 +POSTHOOK: Input: default@qtr_acid@p=456 +#### A masked pattern was here #### +9 456 +10 123 +11 123 +85 456 +86 456 +87 456 +90 456 +92 456 +95 456 +96 456 +97 456 +98 456 +100 123 +103 123 +104 123 +105 123 +111 123 +113 123 +114 123 +116 123 +PREHOOK: query: drop table qtr_acid +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@qtr_acid +PREHOOK: Output: default@qtr_acid +POSTHOOK: query: drop table qtr_acid +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@qtr_acid +POSTHOOK: Output: default@qtr_acid From 4de0d9635757fbad10dc5def295f43fb2bb18fdc Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Wed, 19 Oct 2016 11:01:25 -0700 Subject: [PATCH 14/24] HIVE-14642 : handle insert overwrite for MM tables (Sergey Shelukhin) --- .../hadoop/hive/common/ValidWriteIds.java | 8 +- .../hadoop/hive/ql/exec/FileSinkOperator.java | 6 +- .../apache/hadoop/hive/ql/exec/MoveTask.java | 4 +- .../apache/hadoop/hive/ql/exec/Utilities.java | 9 +- .../apache/hadoop/hive/ql/metadata/Hive.java | 43 +- .../hive/ql/optimizer/GenMapRedUtils.java | 49 +- .../hadoop/hive/ql/plan/FileSinkDesc.java | 26 + ql/src/test/queries/clientpositive/mm_all.q | 100 ++++ ql/src/test/queries/clientpositive/mm_all2.q | 10 +- .../test/queries/clientpositive/mm_current.q | 29 +- .../results/clientpositive/llap/mm_all.q.out | 561 +++++++++++++++++- .../results/clientpositive/llap/mm_all2.q.out | 48 +- .../clientpositive/llap/mm_current.q.out | 204 +------ 13 files changed, 823 insertions(+), 274 deletions(-) diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java index 160f4c079bab..7ef4f550c610 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java +++ b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java @@ -122,18 +122,18 @@ public static String getMmFilePrefix(long mmWriteId) { public static class IdPathFilter implements PathFilter { - private final String prefix, tmpPrefix; + private final String mmDirName, tmpPrefix; private final boolean isMatch; public IdPathFilter(long writeId, boolean isMatch) { - this.prefix = ValidWriteIds.getMmFilePrefix(writeId); - this.tmpPrefix = "_tmp." + prefix; + this.mmDirName = ValidWriteIds.getMmFilePrefix(writeId); + this.tmpPrefix = "_tmp." + mmDirName; this.isMatch = isMatch; } @Override public boolean accept(Path path) { String name = path.getName(); - return isMatch == (name.startsWith(prefix) || name.startsWith(tmpPrefix)); + return isMatch == (name.equals(mmDirName) || name.startsWith(tmpPrefix)); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index c54187fe86b1..22b2149c74c7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -311,10 +311,14 @@ public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeT subdirPath += Path.SEPARATOR + unionPath; } subdirPath += Path.SEPARATOR + taskId; + if (conf.isMerge()) { + // Make sure we don't collide with the source files. + // MM tables don't support concat so we don't expect the merge of merged files. + subdirPath += ".merged"; + } if (!bDynParts && !isSkewedStoredAsSubDirectories) { finalPaths[filesIdx] = getFinalPath(subdirPath, specPath, extension); } else { - // TODO# does this need extra special handing for bucketing? // Note: tmpPath here has the correct partition key finalPaths[filesIdx] = getFinalPath(subdirPath, tmpPath, extension); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 74a650d9982a..02059fb421fe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -258,7 +258,7 @@ public TaskInformation(Task task, String path) { public int execute(DriverContext driverContext) { Utilities.LOG14535.info("Executing MoveWork " + System.identityHashCode(work) + " with " + work.getLoadFileWork() + "; " + work.getLoadTableWork() + "; " - + work.getLoadMultiFilesWork(), new Exception()); + + work.getLoadMultiFilesWork()); try { if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { @@ -435,7 +435,7 @@ private DataContainer handleDynParts(Hive db, Table table, LoadTableDesc tbd, tbd.getPartitionSpec(), tbd.getReplace(), dpCtx.getNumDPCols(), - isSkewedStoredAsDirs(tbd), + (tbd.getLbCtx() == null) ? 0 : tbd.getLbCtx().calculateListBucketingLevel(), work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, SessionState.get().getTxnMgr().getCurrentTxnId(), hasFollowingStatsTask(), work.getLoadTableWork().getWriteType(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index f1dad715e952..0f8384d044c6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -3810,7 +3810,7 @@ private static void tryDelete(FileSystem fs, Path path) { } } - private static FileStatus[] getMmDirectoryCandidates(FileSystem fs, Path path, int dpLevels, + public static FileStatus[] getMmDirectoryCandidates(FileSystem fs, Path path, int dpLevels, int lbLevels, PathFilter filter, long mmWriteId) throws IOException { StringBuilder sb = new StringBuilder(path.toUri().getPath()); for (int i = 0; i < dpLevels + lbLevels; i++) { @@ -3819,7 +3819,11 @@ private static FileStatus[] getMmDirectoryCandidates(FileSystem fs, Path path, i sb.append(Path.SEPARATOR).append(ValidWriteIds.getMmFilePrefix(mmWriteId)); Utilities.LOG14535.info("Looking for files via: " + sb.toString()); Path pathPattern = new Path(path, sb.toString()); - return fs.globStatus(pathPattern, filter); + if (filter == null) { + // TODO: do we need this? Likely yes; we don't want mm_10 when we use ".../mm_1" pattern. + filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); + } + return filter == null ? fs.globStatus(pathPattern) : fs.globStatus(pathPattern, filter); } private static void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manifestDir, @@ -3883,7 +3887,6 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con Reporter reporter) throws IOException, HiveException { FileSystem fs = specPath.getFileSystem(hconf); // Manifests would be at the root level, but the results at target level. - // TODO# special case - doesn't take bucketing into account Path manifestDir = getManifestDir(specPath, unionSuffix); ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 8da9a8055218..4b9335086740 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1832,18 +1832,35 @@ private Map, String> constructListBucketingLocationMap(Path newPart * @return Set of valid partitions * @throws HiveException */ - private Set getValidPartitionsInPath(int numDP, Path loadPath) throws HiveException { + private Set getValidPartitionsInPath( + int numDP, int numLB, Path loadPath, Long mmWriteId) throws HiveException { Set validPartitions = new HashSet(); try { FileSystem fs = loadPath.getFileSystem(conf); - FileStatus[] leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP, fs); + FileStatus[] leafStatus = null; + if (mmWriteId == null) { + leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP, fs); + } else { + // The non-MM path only finds new partitions, as it is looking at the temp path. + // To produce the same effect, we will find all the partitions affected by this write ID. + // TODO# how would this work with multi-insert into the same table? how does the existing one work? + leafStatus = Utilities.getMmDirectoryCandidates( + fs, loadPath, numDP, numLB, null, mmWriteId); + } // Check for empty partitions for (FileStatus s : leafStatus) { - if (!s.isDirectory()) { + if (mmWriteId == null && !s.isDirectory()) { throw new HiveException("partition " + s.getPath() + " is not a directory!"); } - Utilities.LOG14535.info("Found DP " + s.getPath()); - validPartitions.add(s.getPath()); + Path dpPath = s.getPath(); + if (mmWriteId != null) { + dpPath = dpPath.getParent(); // Skip the MM directory that we have found. + for (int i = 0; i < numLB; ++i) { + dpPath = dpPath.getParent(); // Now skip the LB directories, if any... + } + } + Utilities.LOG14535.info("Found DP " + dpPath); + validPartitions.add(dpPath); } } catch (IOException e) { throw new HiveException(e); @@ -1881,7 +1898,7 @@ private Set getValidPartitionsInPath(int numDP, Path loadPath) throws Hive */ public Map, Partition> loadDynamicPartitions(final Path loadPath, final String tableName, final Map partSpec, final boolean replace, - final int numDP, final boolean listBucketingEnabled, final boolean isAcid, final long txnId, + final int numDP, final int numLB, final boolean isAcid, final long txnId, final boolean hasFollowingStatsTask, final AcidUtils.Operation operation, final Long mmWriteId) throws HiveException { @@ -1897,7 +1914,7 @@ public Map, Partition> loadDynamicPartitions(final Path load // Get all valid partition paths and existing partitions for them (if any) final Table tbl = getTable(tableName); - final Set validPartitions = getValidPartitionsInPath(numDP, loadPath); + final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, mmWriteId); final int partsToLoad = validPartitions.size(); final AtomicInteger partitionsLoaded = new AtomicInteger(0); @@ -1926,7 +1943,7 @@ public Void call() throws Exception { // load the partition Utilities.LOG14535.info("loadPartition called for DPP from " + partPath + " to " + tbl.getTableName()); Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, - replace, true, listBucketingEnabled, + replace, true, numLB > 0, false, isAcid, hasFollowingStatsTask, mmWriteId); partitionsMap.put(fullPartSpec, newPartition); @@ -1945,7 +1962,7 @@ public Void call() throws Exception { + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec + ", " + " replace=" + replace + ", " - + " listBucketingEnabled=" + listBucketingEnabled + ", " + + " listBucketingLevel=" + numLB + ", " + " isAcid=" + isAcid + ", " + " hasFollowingStatsTask=" + hasFollowingStatsTask, t); throw t; @@ -3475,7 +3492,13 @@ private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, isOldPathUnderDestf = isSubDir(oldPath, destPath, oldFs, destFs, false); if (isOldPathUnderDestf) { FileStatus[] statuses = oldFs.listStatus(oldPath, pathFilter); - if (statuses != null && statuses.length > 0 && !trashFiles(oldFs, statuses, conf)) { + if (statuses == null || statuses.length == 0) return; + String s = "Deleting files under " + oldPath + " for replace: "; + for (FileStatus file : statuses) { + s += file.getPath().getName() + ", "; + } + Utilities.LOG14535.info(s); + if (!trashFiles(oldFs, statuses, conf)) { throw new HiveException("Destination directory " + destPath + " has not been cleaned up."); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 0b5d56b2b41d..f0b5738e9aa5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1257,7 +1257,6 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, List> mvTasks, HiveConf conf, Task currTask) throws SemanticException { - // // 1. create the operator tree // @@ -1265,20 +1264,29 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, Utilities.LOG14535.info("Creating merge work from " + System.identityHashCode(fsInput) + " with write ID " + (fsInputDesc.isMmTable() ? fsInputDesc.getMmWriteId() : null) + " into " + finalName); - // Create a TableScan operator - RowSchema inputRS = fsInput.getSchema(); - TableScanOperator tsMerge = GenMapRedUtils.createTemporaryTableScanOperator( - fsInput.getCompilationOpContext(), inputRS); + boolean isBlockMerge = (conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) && + fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) || + (conf.getBoolVar(ConfVars.HIVEMERGEORCFILESTRIPELEVEL) && + fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class)); + RowSchema inputRS = fsInput.getSchema(); Long srcMmWriteId = fsInputDesc.isMmTable() ? fsInputDesc.getMmWriteId() : null; - - // Create a FileSink operator - TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone(); - FileSinkDesc fsOutputDesc = new FileSinkDesc( - finalName, ts, conf.getBoolVar(ConfVars.COMPRESSRESULT)); - fsOutputDesc.setMmWriteId(srcMmWriteId); - // Create and attach the filesink for the merge. We don't actually need it for anything here. - OperatorFactory.getAndMakeChild(fsOutputDesc, inputRS, tsMerge); + FileSinkDesc fsOutputDesc = null; + TableScanOperator tsMerge = null; + if (!isBlockMerge) { + // Create a TableScan operator + tsMerge = GenMapRedUtils.createTemporaryTableScanOperator( + fsInput.getCompilationOpContext(), inputRS); + + // Create a FileSink operator + TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone(); + Path mergeDest = srcMmWriteId == null ? finalName : finalName.getParent(); + fsOutputDesc = new FileSinkDesc(mergeDest, ts, conf.getBoolVar(ConfVars.COMPRESSRESULT)); + fsOutputDesc.setMmWriteId(srcMmWriteId); + fsOutputDesc.setIsMerge(true); + // Create and attach the filesink for the merge. + OperatorFactory.getAndMakeChild(fsOutputDesc, inputRS, tsMerge); + } // If the input FileSinkOperator is a dynamic partition enabled, the tsMerge input schema // needs to include the partition column, and the fsOutput should have @@ -1296,9 +1304,11 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, } inputRS.setSignature(signature); + if (!isBlockMerge) { // create another DynamicPartitionCtx, which has a different input-to-DP column mapping - DynamicPartitionCtx dpCtx2 = new DynamicPartitionCtx(dpCtx); - fsOutputDesc.setDynPartCtx(dpCtx2); + DynamicPartitionCtx dpCtx2 = new DynamicPartitionCtx(dpCtx); + fsOutputDesc.setDynPartCtx(dpCtx2); + } // update the FileSinkOperator to include partition columns usePartitionColumns(fsInputDesc.getTableInfo().getProperties(), dpCtx.getDPColNames()); @@ -1315,11 +1325,7 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, MapWork cplan; Serializable work; - if ((conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) && - fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) || - (conf.getBoolVar(ConfVars.HIVEMERGEORCFILESTRIPELEVEL) && - fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class))) { - + if (isBlockMerge) { cplan = GenMapRedUtils.createMergeTask(fsInputDesc, finalName, dpCtx != null && dpCtx.getNumDPCols() > 0, fsInput.getCompilationOpContext()); if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { @@ -1375,8 +1381,7 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, // MM directory, the original MoveTask still commits based on the parent. Note that this path // can only be triggered for a merge that's part of insert for now; MM tables do not support // concatenate. Keeping the old logic for non-MM tables with temp directories and stuff. - Path fsopPath = srcMmWriteId != null - ? fsInputDesc.getFinalDirName() : fsOutputDesc.getFinalDirName(); + Path fsopPath = srcMmWriteId != null ? fsInputDesc.getFinalDirName() : finalName; linkMoveTask(fsopPath, cndTsk, mvTasks, conf, dependencyTask); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index f6331f238fdc..83f2c92eea30 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -97,6 +97,7 @@ public enum DPSortState { private Path destPath; private boolean isHiveServerQuery; private Long mmWriteId; + private boolean isMerge; public FileSinkDesc() { } @@ -157,6 +158,7 @@ public Object clone() throws CloneNotSupportedException { ret.setWriteType(writeType); ret.setTransactionId(txnId); ret.setStatsTmpDir(statsTmpDir); + ret.setIsMerge(isMerge); return ret; } @@ -490,4 +492,28 @@ public void setStatsTmpDir(String statsCollectionTempDir) { public void setMmWriteId(Long mmWriteId) { this.mmWriteId = mmWriteId; } + + public class FileSinkOperatorExplainVectorization extends OperatorExplainVectorization { + + public FileSinkOperatorExplainVectorization(VectorDesc vectorDesc) { + // Native vectorization not supported. + super(vectorDesc, false); + } + } + + @Explain(vectorization = Vectorization.OPERATOR, displayName = "File Sink Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED }) + public FileSinkOperatorExplainVectorization getFileSinkVectorization() { + if (vectorDesc == null) { + return null; + } + return new FileSinkOperatorExplainVectorization(vectorDesc); + } + + public void setIsMerge(boolean b) { + this.isMerge = b; + } + + public boolean isMerge() { + return isMerge; + } } diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index bdda5f5de71f..90523d333b09 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -152,6 +152,19 @@ select * from merge0_mm; drop table merge0_mm; +create table merge2_mm (id int) tblproperties('hivecommit'='true'); + +insert into table merge2_mm select key from intermediate; +select * from merge2_mm; + +set tez.grouping.split-count=1; +insert into table merge2_mm select key from intermediate; +set tez.grouping.split-count=0; +select * from merge2_mm; + +drop table merge2_mm; + + create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true'); insert into table merge1_mm partition (key) select key, key from intermediate; @@ -179,4 +192,91 @@ select * from ctas1_mm; drop table ctas1_mm; +set hive.merge.tezfiles=false; +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +drop table iow0_mm; +create table iow0_mm(key int) tblproperties('hivecommit'='true'); +insert overwrite table iow0_mm select key from intermediate; +insert into table iow0_mm select key + 1 from intermediate; +select * from iow0_mm; +insert overwrite table iow0_mm select key + 2 from intermediate; +select * from iow0_mm; +drop table iow0_mm; + + +drop table iow1_mm; +create table iow1_mm(key int) partitioned by (key2 int) tblproperties('hivecommit'='true'); +insert overwrite table iow1_mm partition (key2) +select key as k1, key from intermediate union all select key as k1, key from intermediate; +insert into table iow1_mm partition (key2) +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate; +select * from iow1_mm; +insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate; +select * from iow1_mm; +insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate; +select * from iow1_mm; +drop table iow1_mm; + + + +-- TODO# future +-- +--create table load_overwrite (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/load_overwrite'; +--create table load_overwrite2 (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/load2_overwrite2'; +-- +--load data local inpath '../../data/files/kv1.txt' into table load_overwrite; +--load data local inpath '../../data/files/kv2.txt' into table load_overwrite; +--load data local inpath '../../data/files/kv3.txt' into table load_overwrite; +-- +--show table extended like load_overwrite; +--desc extended load_overwrite; +--select count(*) from load_overwrite; +-- +--load data inpath '${system:test.tmp.dir}/load_overwrite/kv*.txt' overwrite into table load_overwrite2; +-- +-- +--load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10') +-- +-- +-- +-- +-- +--create table exim_department ( dep_id int) stored as textfile; +--load data local inpath "../../data/files/test.dat" into table exim_department; +--dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; +--dfs -rmr target/tmp/ql/test/data/exports/exim_department; +--export table exim_department to 'ql/test/data/exports/exim_department'; +--drop table exim_department; +-- +--create database importer; +--use importer; +-- +--create table exim_department ( dep_id int) stored as textfile; +--set hive.security.authorization.enabled=true; +--import from 'ql/test/data/exports/exim_department'; +-- +-- +--create table exim_department ( dep_id int) stored as textfile; +--load data local inpath "../../data/files/test.dat" into table exim_department; +--dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/test; +--dfs -rmr target/tmp/ql/test/data/exports/exim_department; +--export table exim_department to 'ql/test/data/exports/exim_department'; +--drop table exim_department; +-- +--create database importer; +--use importer; +-- +--set hive.security.authorization.enabled=true; +--import from 'ql/test/data/exports/exim_department'; + + + +-- TODO multi-insert + + + drop table intermediate; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/mm_all2.q b/ql/src/test/queries/clientpositive/mm_all2.q index a1d2301908a4..c4f305876957 100644 --- a/ql/src/test/queries/clientpositive/mm_all2.q +++ b/ql/src/test/queries/clientpositive/mm_all2.q @@ -6,6 +6,8 @@ set tez.grouping.max-size=2; set hive.exec.dynamic.partition.mode=nonstrict; +-- Bucketing tests are slow and some tablesample ones don't work w/o MM + -- Force multiple writers when reading drop table intermediate; create table intermediate(key int) partitioned by (p int) stored as orc; @@ -59,12 +61,4 @@ select * from bucket2_mm tablesample (bucket 1 out of 10) s; select * from bucket2_mm tablesample (bucket 4 out of 10) s; drop table bucket2_mm; - - --- TODO# future - - - --- TODO load, multi-insert, buckets - drop table intermediate; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index 44445dd835c0..688492886790 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -10,21 +10,20 @@ drop table intermediate; create table intermediate(key int) partitioned by (p int) stored as orc; insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; -insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; - - -drop table bucket1_mm; -create table bucket1_mm(key int, id int) partitioned by (key2 int) -clustered by (key) sorted by (key) into 2 buckets -tblproperties('hivecommit'='true'); -insert into table bucket1_mm partition (key2) -select key + 1, key, key - 1 from intermediate -union all -select key - 1, key, key + 1 from intermediate; -select * from bucket1_mm; -select * from bucket1_mm tablesample (bucket 1 out of 2) s; -select * from bucket1_mm tablesample (bucket 2 out of 2) s; -drop table bucket1_mm; + + +set hive.merge.tezfiles=true; +set hive.merge.mapfiles=true; +set hive.merge.mapredfiles=true; +set hive.merge.orcfile.stripe.level=true; + + +drop table merge2_mm; +create table merge2_mm(key int) tblproperties('hivecommit'='true'); +insert overwrite table merge2_mm select key from intermediate; +select * from merge2_mm; +drop table merge2_mm; + diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out index 4061e5bb2bdc..92f32e5d5e29 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -951,6 +951,86 @@ POSTHOOK: query: drop table merge0_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@merge0_mm POSTHOOK: Output: default@merge0_mm +PREHOOK: query: create table merge2_mm (id int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: create table merge2_mm (id int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@merge2_mm +PREHOOK: query: insert into table merge2_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: insert into table merge2_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@merge2_mm +POSTHOOK: Lineage: merge2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from merge2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge2_mm +#### A masked pattern was here #### +98 +97 +100 +103 +0 +10 +PREHOOK: query: insert into table merge2_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: insert into table merge2_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@merge2_mm +POSTHOOK: Lineage: merge2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from merge2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge2_mm +#### A masked pattern was here #### +98 +97 +100 +103 +0 +10 +98 +97 +100 +103 +0 +10 +PREHOOK: query: drop table merge2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@merge2_mm +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: drop table merge2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@merge2_mm +POSTHOOK: Output: default@merge2_mm PREHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -1177,11 +1257,488 @@ POSTHOOK: query: drop table ctas1_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@ctas1_mm POSTHOOK: Output: default@ctas1_mm -PREHOOK: query: drop table intermediate +PREHOOK: query: drop table iow0_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table iow0_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table iow0_mm(key int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@iow0_mm +POSTHOOK: query: create table iow0_mm(key int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@iow0_mm +PREHOOK: query: insert overwrite table iow0_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow0_mm +POSTHOOK: query: insert overwrite table iow0_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow0_mm +POSTHOOK: Lineage: iow0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table iow0_mm select key + 1 from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow0_mm +POSTHOOK: query: insert into table iow0_mm select key + 1 from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow0_mm +POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from iow0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@iow0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from iow0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@iow0_mm +#### A masked pattern was here #### +98 +97 +100 +103 +0 +10 +99 +98 +101 +104 +1 +11 +PREHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow0_mm +POSTHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow0_mm +POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from iow0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@iow0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from iow0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@iow0_mm +#### A masked pattern was here #### +100 +99 +102 +105 +2 +12 +PREHOOK: query: drop table iow0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@iow0_mm +PREHOOK: Output: default@iow0_mm +POSTHOOK: query: drop table iow0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@iow0_mm +POSTHOOK: Output: default@iow0_mm +PREHOOK: query: drop table iow1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table iow1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table iow1_mm(key int) partitioned by (key2 int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: create table iow1_mm(key int) partitioned by (key2 int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@iow1_mm +PREHOOK: query: insert overwrite table iow1_mm partition (key2) +select key as k1, key from intermediate union all select key as k1, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: insert overwrite table iow1_mm partition (key2) +select key as k1, key from intermediate union all select key as k1, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow1_mm@key2=0 +POSTHOOK: Output: default@iow1_mm@key2=10 +POSTHOOK: Output: default@iow1_mm@key2=100 +POSTHOOK: Output: default@iow1_mm@key2=103 +POSTHOOK: Output: default@iow1_mm@key2=97 +POSTHOOK: Output: default@iow1_mm@key2=98 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table iow1_mm partition (key2) +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: insert into table iow1_mm partition (key2) +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow1_mm@key2=0 +POSTHOOK: Output: default@iow1_mm@key2=10 +POSTHOOK: Output: default@iow1_mm@key2=100 +POSTHOOK: Output: default@iow1_mm@key2=103 +POSTHOOK: Output: default@iow1_mm@key2=97 +POSTHOOK: Output: default@iow1_mm@key2=98 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from iow1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@iow1_mm +PREHOOK: Input: default@iow1_mm@key2=0 +PREHOOK: Input: default@iow1_mm@key2=10 +PREHOOK: Input: default@iow1_mm@key2=100 +PREHOOK: Input: default@iow1_mm@key2=103 +PREHOOK: Input: default@iow1_mm@key2=97 +PREHOOK: Input: default@iow1_mm@key2=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from iow1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@iow1_mm +POSTHOOK: Input: default@iow1_mm@key2=0 +POSTHOOK: Input: default@iow1_mm@key2=10 +POSTHOOK: Input: default@iow1_mm@key2=100 +POSTHOOK: Input: default@iow1_mm@key2=103 +POSTHOOK: Input: default@iow1_mm@key2=97 +POSTHOOK: Input: default@iow1_mm@key2=98 +#### A masked pattern was here #### +0 0 +0 0 +1 0 +0 0 +10 10 +10 10 +11 10 +10 10 +100 100 +100 100 +101 100 +100 100 +103 103 +103 103 +104 103 +103 103 +97 97 +97 97 +98 97 +97 97 +98 98 +98 98 +99 98 +98 98 +PREHOOK: query: insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow1_mm@key2=0 +POSTHOOK: Output: default@iow1_mm@key2=10 +POSTHOOK: Output: default@iow1_mm@key2=100 +POSTHOOK: Output: default@iow1_mm@key2=103 +POSTHOOK: Output: default@iow1_mm@key2=97 +POSTHOOK: Output: default@iow1_mm@key2=98 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from iow1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@iow1_mm +PREHOOK: Input: default@iow1_mm@key2=0 +PREHOOK: Input: default@iow1_mm@key2=10 +PREHOOK: Input: default@iow1_mm@key2=100 +PREHOOK: Input: default@iow1_mm@key2=103 +PREHOOK: Input: default@iow1_mm@key2=97 +PREHOOK: Input: default@iow1_mm@key2=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from iow1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@iow1_mm +POSTHOOK: Input: default@iow1_mm@key2=0 +POSTHOOK: Input: default@iow1_mm@key2=10 +POSTHOOK: Input: default@iow1_mm@key2=100 +POSTHOOK: Input: default@iow1_mm@key2=103 +POSTHOOK: Input: default@iow1_mm@key2=97 +POSTHOOK: Input: default@iow1_mm@key2=98 +#### A masked pattern was here #### +3 0 +4 0 +13 10 +14 10 +103 100 +104 100 +106 103 +107 103 +100 97 +101 97 +101 98 +102 98 +PREHOOK: query: insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow1_mm@key2=100 +POSTHOOK: Output: default@iow1_mm@key2=101 +POSTHOOK: Output: default@iow1_mm@key2=102 +POSTHOOK: Output: default@iow1_mm@key2=103 +POSTHOOK: Output: default@iow1_mm@key2=105 +POSTHOOK: Output: default@iow1_mm@key2=106 +POSTHOOK: Output: default@iow1_mm@key2=12 +POSTHOOK: Output: default@iow1_mm@key2=13 +POSTHOOK: Output: default@iow1_mm@key2=2 +POSTHOOK: Output: default@iow1_mm@key2=3 +POSTHOOK: Output: default@iow1_mm@key2=99 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=105).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=106).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=12).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=13).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=2).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=3).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from iow1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@iow1_mm +PREHOOK: Input: default@iow1_mm@key2=0 +PREHOOK: Input: default@iow1_mm@key2=10 +PREHOOK: Input: default@iow1_mm@key2=100 +PREHOOK: Input: default@iow1_mm@key2=101 +PREHOOK: Input: default@iow1_mm@key2=102 +PREHOOK: Input: default@iow1_mm@key2=103 +PREHOOK: Input: default@iow1_mm@key2=105 +PREHOOK: Input: default@iow1_mm@key2=106 +PREHOOK: Input: default@iow1_mm@key2=12 +PREHOOK: Input: default@iow1_mm@key2=13 +PREHOOK: Input: default@iow1_mm@key2=2 +PREHOOK: Input: default@iow1_mm@key2=3 +PREHOOK: Input: default@iow1_mm@key2=97 +PREHOOK: Input: default@iow1_mm@key2=98 +PREHOOK: Input: default@iow1_mm@key2=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from iow1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@iow1_mm +POSTHOOK: Input: default@iow1_mm@key2=0 +POSTHOOK: Input: default@iow1_mm@key2=10 +POSTHOOK: Input: default@iow1_mm@key2=100 +POSTHOOK: Input: default@iow1_mm@key2=101 +POSTHOOK: Input: default@iow1_mm@key2=102 +POSTHOOK: Input: default@iow1_mm@key2=103 +POSTHOOK: Input: default@iow1_mm@key2=105 +POSTHOOK: Input: default@iow1_mm@key2=106 +POSTHOOK: Input: default@iow1_mm@key2=12 +POSTHOOK: Input: default@iow1_mm@key2=13 +POSTHOOK: Input: default@iow1_mm@key2=2 +POSTHOOK: Input: default@iow1_mm@key2=3 +POSTHOOK: Input: default@iow1_mm@key2=97 +POSTHOOK: Input: default@iow1_mm@key2=98 +POSTHOOK: Input: default@iow1_mm@key2=99 +#### A masked pattern was here #### +3 0 +4 0 +13 10 +14 10 +100 100 +100 100 +101 101 +102 102 +103 103 +105 105 +106 106 +12 12 +13 13 +2 2 +3 3 +100 97 +101 97 +101 98 +102 98 +99 99 +PREHOOK: query: drop table iow1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@iow1_mm +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: drop table iow1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@iow1_mm +POSTHOOK: Output: default@iow1_mm +PREHOOK: query: -- TODO# future +-- +#### A masked pattern was here #### +-- +--load data local inpath '../../data/files/kv1.txt' into table load_overwrite; +--load data local inpath '../../data/files/kv2.txt' into table load_overwrite; +--load data local inpath '../../data/files/kv3.txt' into table load_overwrite; +-- +--show table extended like load_overwrite; +--desc extended load_overwrite; +--select count(*) from load_overwrite; +-- +#### A masked pattern was here #### +-- +-- +--load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10') +-- +-- +-- +-- +-- +--create table exim_department ( dep_id int) stored as textfile; +--load data local inpath "../../data/files/test.dat" into table exim_department; +#### A masked pattern was here #### +--export table exim_department to 'ql/test/data/exports/exim_department'; +--drop table exim_department; +-- +--create database importer; +--use importer; +-- +--create table exim_department ( dep_id int) stored as textfile; +--set hive.security.authorization.enabled=true; +--import from 'ql/test/data/exports/exim_department'; +-- +-- +--create table exim_department ( dep_id int) stored as textfile; +--load data local inpath "../../data/files/test.dat" into table exim_department; +#### A masked pattern was here #### +--export table exim_department to 'ql/test/data/exports/exim_department'; +--drop table exim_department; +-- +--create database importer; +--use importer; +-- +--set hive.security.authorization.enabled=true; +--import from 'ql/test/data/exports/exim_department'; + + + +-- TODO multi-insert + + + +drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: drop table intermediate +POSTHOOK: query: -- TODO# future +-- +#### A masked pattern was here #### +-- +--load data local inpath '../../data/files/kv1.txt' into table load_overwrite; +--load data local inpath '../../data/files/kv2.txt' into table load_overwrite; +--load data local inpath '../../data/files/kv3.txt' into table load_overwrite; +-- +--show table extended like load_overwrite; +--desc extended load_overwrite; +--select count(*) from load_overwrite; +-- +#### A masked pattern was here #### +-- +-- +--load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10') +-- +-- +-- +-- +-- +--create table exim_department ( dep_id int) stored as textfile; +--load data local inpath "../../data/files/test.dat" into table exim_department; +#### A masked pattern was here #### +--export table exim_department to 'ql/test/data/exports/exim_department'; +--drop table exim_department; +-- +--create database importer; +--use importer; +-- +--create table exim_department ( dep_id int) stored as textfile; +--set hive.security.authorization.enabled=true; +--import from 'ql/test/data/exports/exim_department'; +-- +-- +--create table exim_department ( dep_id int) stored as textfile; +--load data local inpath "../../data/files/test.dat" into table exim_department; +#### A masked pattern was here #### +--export table exim_department to 'ql/test/data/exports/exim_department'; +--drop table exim_department; +-- +--create database importer; +--use importer; +-- +--set hive.security.authorization.enabled=true; +--import from 'ql/test/data/exports/exim_department'; + + + +-- TODO multi-insert + + + +drop table intermediate POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@intermediate POSTHOOK: Output: default@intermediate diff --git a/ql/src/test/results/clientpositive/llap/mm_all2.q.out b/ql/src/test/results/clientpositive/llap/mm_all2.q.out index 95ce33ae3d36..3921c7df640e 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all2.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all2.q.out @@ -1,7 +1,11 @@ -PREHOOK: query: -- Force multiple writers when reading +PREHOOK: query: -- Bucketing tests are slow and some tablesample ones don't work w/o MM + +-- Force multiple writers when reading drop table intermediate PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Force multiple writers when reading +POSTHOOK: query: -- Bucketing tests are slow and some tablesample ones don't work w/o MM + +-- Force multiple writers when reading drop table intermediate POSTHOOK: type: DROPTABLE PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc @@ -79,12 +83,12 @@ POSTHOOK: query: select * from bucket0_mm POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket0_mm #### A masked pattern was here #### -100 100 +98 98 0 0 10 10 -98 98 -103 103 +100 100 97 97 +103 103 PREHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s PREHOOK: type: QUERY PREHOOK: Input: default@bucket0_mm @@ -93,10 +97,10 @@ POSTHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket0_mm #### A masked pattern was here #### -100 100 +98 98 0 0 10 10 -98 98 +100 100 PREHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s PREHOOK: type: QUERY PREHOOK: Input: default@bucket0_mm @@ -105,8 +109,8 @@ POSTHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket0_mm #### A masked pattern was here #### -103 103 97 97 +103 103 PREHOOK: query: insert into table bucket0_mm select key, key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate @@ -131,12 +135,12 @@ POSTHOOK: query: select * from bucket0_mm POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket0_mm #### A masked pattern was here #### -100 100 +98 98 0 0 10 10 -98 98 -103 103 +100 100 97 97 +103 103 98 98 0 0 10 10 @@ -151,10 +155,10 @@ POSTHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket0_mm #### A masked pattern was here #### -100 100 +98 98 0 0 10 10 -98 98 +100 100 98 98 0 0 10 10 @@ -167,8 +171,8 @@ POSTHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket0_mm #### A masked pattern was here #### -103 103 97 97 +103 103 97 97 103 103 PREHOOK: query: drop table bucket0_mm @@ -481,23 +485,11 @@ POSTHOOK: query: drop table bucket2_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@bucket2_mm POSTHOOK: Output: default@bucket2_mm -PREHOOK: query: -- TODO# future - - - --- TODO load, multi-insert, buckets - -drop table intermediate +PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: -- TODO# future - - - --- TODO load, multi-insert, buckets - -drop table intermediate +POSTHOOK: query: drop table intermediate POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@intermediate POSTHOOK: Output: default@intermediate diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index 1bbef9d70b00..934a5fb0f0b6 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -28,205 +28,51 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: default@intermediate@p=457 -POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: default@intermediate@p=457 -POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: drop table bucket1_mm +PREHOOK: query: drop table merge2_mm PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table bucket1_mm +POSTHOOK: query: drop table merge2_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: create table bucket1_mm(key int, id int) partitioned by (key2 int) -clustered by (key) sorted by (key) into 2 buckets -tblproperties('hivecommit'='true') +PREHOOK: query: create table merge2_mm(key int) tblproperties('hivecommit'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@bucket1_mm -POSTHOOK: query: create table bucket1_mm(key int, id int) partitioned by (key2 int) -clustered by (key) sorted by (key) into 2 buckets -tblproperties('hivecommit'='true') +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: create table merge2_mm(key int) tblproperties('hivecommit'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@bucket1_mm -PREHOOK: query: insert into table bucket1_mm partition (key2) -select key + 1, key, key - 1 from intermediate -union all -select key - 1, key, key + 1 from intermediate +POSTHOOK: Output: default@merge2_mm +PREHOOK: query: insert overwrite table merge2_mm select key from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Input: default@intermediate@p=457 -PREHOOK: Output: default@bucket1_mm -POSTHOOK: query: insert into table bucket1_mm partition (key2) -select key + 1, key, key - 1 from intermediate -union all -select key - 1, key, key + 1 from intermediate +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: insert overwrite table merge2_mm select key from intermediate POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Input: default@intermediate@p=457 -POSTHOOK: Output: default@bucket1_mm@key2=-1 -POSTHOOK: Output: default@bucket1_mm@key2=1 -POSTHOOK: Output: default@bucket1_mm@key2=101 -POSTHOOK: Output: default@bucket1_mm@key2=102 -POSTHOOK: Output: default@bucket1_mm@key2=104 -POSTHOOK: Output: default@bucket1_mm@key2=11 -POSTHOOK: Output: default@bucket1_mm@key2=9 -POSTHOOK: Output: default@bucket1_mm@key2=96 -POSTHOOK: Output: default@bucket1_mm@key2=97 -POSTHOOK: Output: default@bucket1_mm@key2=98 -POSTHOOK: Output: default@bucket1_mm@key2=99 -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from bucket1_mm -PREHOOK: type: QUERY -PREHOOK: Input: default@bucket1_mm -PREHOOK: Input: default@bucket1_mm@key2=-1 -PREHOOK: Input: default@bucket1_mm@key2=1 -PREHOOK: Input: default@bucket1_mm@key2=101 -PREHOOK: Input: default@bucket1_mm@key2=102 -PREHOOK: Input: default@bucket1_mm@key2=104 -PREHOOK: Input: default@bucket1_mm@key2=11 -PREHOOK: Input: default@bucket1_mm@key2=9 -PREHOOK: Input: default@bucket1_mm@key2=96 -PREHOOK: Input: default@bucket1_mm@key2=97 -PREHOOK: Input: default@bucket1_mm@key2=98 -PREHOOK: Input: default@bucket1_mm@key2=99 -#### A masked pattern was here #### -POSTHOOK: query: select * from bucket1_mm -POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket1_mm -POSTHOOK: Input: default@bucket1_mm@key2=-1 -POSTHOOK: Input: default@bucket1_mm@key2=1 -POSTHOOK: Input: default@bucket1_mm@key2=101 -POSTHOOK: Input: default@bucket1_mm@key2=102 -POSTHOOK: Input: default@bucket1_mm@key2=104 -POSTHOOK: Input: default@bucket1_mm@key2=11 -POSTHOOK: Input: default@bucket1_mm@key2=9 -POSTHOOK: Input: default@bucket1_mm@key2=96 -POSTHOOK: Input: default@bucket1_mm@key2=97 -POSTHOOK: Input: default@bucket1_mm@key2=98 -POSTHOOK: Input: default@bucket1_mm@key2=99 -#### A masked pattern was here #### -1 0 -1 --1 0 1 -99 100 101 -104 103 102 -102 103 104 -9 10 11 -11 10 9 -98 97 96 -99 98 97 -96 97 98 -97 98 99 -101 100 99 -PREHOOK: query: select * from bucket1_mm tablesample (bucket 1 out of 2) s -PREHOOK: type: QUERY -PREHOOK: Input: default@bucket1_mm -PREHOOK: Input: default@bucket1_mm@key2=-1 -PREHOOK: Input: default@bucket1_mm@key2=1 -PREHOOK: Input: default@bucket1_mm@key2=101 -PREHOOK: Input: default@bucket1_mm@key2=102 -PREHOOK: Input: default@bucket1_mm@key2=104 -PREHOOK: Input: default@bucket1_mm@key2=11 -PREHOOK: Input: default@bucket1_mm@key2=9 -PREHOOK: Input: default@bucket1_mm@key2=96 -PREHOOK: Input: default@bucket1_mm@key2=97 -PREHOOK: Input: default@bucket1_mm@key2=98 -PREHOOK: Input: default@bucket1_mm@key2=99 -#### A masked pattern was here #### -POSTHOOK: query: select * from bucket1_mm tablesample (bucket 1 out of 2) s -POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket1_mm -POSTHOOK: Input: default@bucket1_mm@key2=-1 -POSTHOOK: Input: default@bucket1_mm@key2=1 -POSTHOOK: Input: default@bucket1_mm@key2=101 -POSTHOOK: Input: default@bucket1_mm@key2=102 -POSTHOOK: Input: default@bucket1_mm@key2=104 -POSTHOOK: Input: default@bucket1_mm@key2=11 -POSTHOOK: Input: default@bucket1_mm@key2=9 -POSTHOOK: Input: default@bucket1_mm@key2=96 -POSTHOOK: Input: default@bucket1_mm@key2=97 -POSTHOOK: Input: default@bucket1_mm@key2=98 -POSTHOOK: Input: default@bucket1_mm@key2=99 -#### A masked pattern was here #### -104 103 102 -102 103 104 -98 97 96 -96 97 98 -PREHOOK: query: select * from bucket1_mm tablesample (bucket 2 out of 2) s +POSTHOOK: Output: default@merge2_mm +POSTHOOK: Lineage: merge2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge2_mm PREHOOK: type: QUERY -PREHOOK: Input: default@bucket1_mm -PREHOOK: Input: default@bucket1_mm@key2=-1 -PREHOOK: Input: default@bucket1_mm@key2=1 -PREHOOK: Input: default@bucket1_mm@key2=101 -PREHOOK: Input: default@bucket1_mm@key2=102 -PREHOOK: Input: default@bucket1_mm@key2=104 -PREHOOK: Input: default@bucket1_mm@key2=11 -PREHOOK: Input: default@bucket1_mm@key2=9 -PREHOOK: Input: default@bucket1_mm@key2=96 -PREHOOK: Input: default@bucket1_mm@key2=97 -PREHOOK: Input: default@bucket1_mm@key2=98 -PREHOOK: Input: default@bucket1_mm@key2=99 +PREHOOK: Input: default@merge2_mm #### A masked pattern was here #### -POSTHOOK: query: select * from bucket1_mm tablesample (bucket 2 out of 2) s +POSTHOOK: query: select * from merge2_mm POSTHOOK: type: QUERY -POSTHOOK: Input: default@bucket1_mm -POSTHOOK: Input: default@bucket1_mm@key2=-1 -POSTHOOK: Input: default@bucket1_mm@key2=1 -POSTHOOK: Input: default@bucket1_mm@key2=101 -POSTHOOK: Input: default@bucket1_mm@key2=102 -POSTHOOK: Input: default@bucket1_mm@key2=104 -POSTHOOK: Input: default@bucket1_mm@key2=11 -POSTHOOK: Input: default@bucket1_mm@key2=9 -POSTHOOK: Input: default@bucket1_mm@key2=96 -POSTHOOK: Input: default@bucket1_mm@key2=97 -POSTHOOK: Input: default@bucket1_mm@key2=98 -POSTHOOK: Input: default@bucket1_mm@key2=99 +POSTHOOK: Input: default@merge2_mm #### A masked pattern was here #### -1 0 -1 --1 0 1 -99 100 101 -9 10 11 -11 10 9 -99 98 97 -97 98 99 -101 100 99 -PREHOOK: query: drop table bucket1_mm +98 +97 +0 +10 +PREHOOK: query: drop table merge2_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@bucket1_mm -PREHOOK: Output: default@bucket1_mm -POSTHOOK: query: drop table bucket1_mm +PREHOOK: Input: default@merge2_mm +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: drop table merge2_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@bucket1_mm -POSTHOOK: Output: default@bucket1_mm +POSTHOOK: Input: default@merge2_mm +POSTHOOK: Output: default@merge2_mm PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate From a057e12cfeff59a6955d58e3bc4107a204b88ea2 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Wed, 19 Oct 2016 11:13:02 -0700 Subject: [PATCH 15/24] HIVE-14996 : handle load for MM tables (Sergey Shelukhin) --- .../apache/hadoop/hive/ql/exec/MoveTask.java | 2 +- .../hive/ql/exec/OrcFileMergeOperator.java | 2 +- .../apache/hadoop/hive/ql/metadata/Hive.java | 73 ++-- .../hive/ql/parse/ImportSemanticAnalyzer.java | 7 +- .../hive/ql/parse/LoadSemanticAnalyzer.java | 15 +- .../hadoop/hive/ql/plan/FileSinkDesc.java | 16 - .../hadoop/hive/ql/plan/LoadFileDesc.java | 2 +- .../hadoop/hive/ql/plan/LoadTableDesc.java | 6 +- ql/src/test/queries/clientpositive/mm_all.q | 86 +++-- .../test/queries/clientpositive/mm_current.q | 51 ++- .../results/clientpositive/llap/mm_all.q.out | 354 +++++++++++++++--- .../clientpositive/llap/mm_current.q.out | 313 ++++++++++++++-- 12 files changed, 753 insertions(+), 174 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 02059fb421fe..eea435710a13 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -325,7 +325,7 @@ public int execute(DriverContext driverContext) { DataContainer dc = null; if (tbd.getPartitionSpec().size() == 0) { dc = new DataContainer(table.getTTable()); - Utilities.LOG14535.info("loadTable called from " + tbd.getSourcePath() + " into " + tbd.getTable().getTableName(), new Exception()); + Utilities.LOG14535.info("loadTable called from " + tbd.getSourcePath() + " into " + tbd.getTable().getTableName()); db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getReplace(), work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isAcid, hasFollowingStatsTask(), tbd.getMmWriteId()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java index 835791b86397..a845b5022749 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java @@ -94,7 +94,7 @@ private void processKeyValuePairs(Object key, Object value) filePath = k.getInputPath().toUri().getPath(); - Utilities.LOG14535.info("OrcFileMergeOperator processing " + filePath, new Exception()); + Utilities.LOG14535.info("OrcFileMergeOperator processing " + filePath); fixTmpPath(k.getInputPath().getParent()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 4b9335086740..d6dc2d3c5b8c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1588,22 +1588,31 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par List newFiles = null; PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin("MoveTask", "FileMoves"); - if (mmWriteId != null) { - Utilities.LOG14535.info("not moving " + loadPath + " to " + newPartPath); + if (mmWriteId != null && loadPath.equals(newPartPath)) { + // MM insert query, move itself is a no-op. + Utilities.LOG14535.info("not moving " + loadPath + " to " + newPartPath + " (MM)"); assert !isAcid; if (areEventsForDmlNeeded(tbl, oldPart)) { newFiles = listFilesCreatedByQuery(loadPath, mmWriteId); } Utilities.LOG14535.info("maybe deleting stuff from " + oldPartPath + " (new " + newPartPath + ") for replace"); - if (replace && oldPartPath != null) { // TODO# is this correct? ignore until iow jira - deleteOldPathForReplace(newPartPath, oldPartPath, - getConf(), new ValidWriteIds.IdPathFilter(mmWriteId, false)); + if (replace && oldPartPath != null) { + deleteOldPathForReplace(newPartPath, oldPartPath, getConf(), + new ValidWriteIds.IdPathFilter(mmWriteId, false), mmWriteId != null); } } else { - Utilities.LOG14535.info("moving " + loadPath + " to " + newPartPath); + // Either a non-MM query, or a load into MM table from an external source. + PathFilter filter = FileUtils.HIDDEN_FILES_PATH_FILTER; + Path destPath = newPartPath; + if (mmWriteId != null) { + // We will load into MM directory, and delete from the parent if needed. + destPath = new Path(destPath, ValidWriteIds.getMmFilePrefix(mmWriteId)); + filter = replace ? new ValidWriteIds.IdPathFilter(mmWriteId, false) : filter; + } + Utilities.LOG14535.info("moving " + loadPath + " to " + destPath); if (replace || (oldPart == null && !isAcid)) { - replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(), - isSrcLocal); + replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(), + isSrcLocal, filter, mmWriteId != null); } else { if (areEventsForDmlNeeded(tbl, oldPart)) { newFiles = Collections.synchronizedList(new ArrayList()); @@ -2038,28 +2047,35 @@ public void loadTable(Path loadPath, String tableName, boolean replace, boolean if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) { newFiles = Collections.synchronizedList(new ArrayList()); } - if (mmWriteId == null) { - Utilities.LOG14535.info("moving " + loadPath + " to " + tbl.getPath()); + if (mmWriteId != null && loadPath.equals(tbl.getPath())) { + Utilities.LOG14535.info("not moving " + loadPath + " to " + tbl.getPath()); if (replace) { Path tableDest = tbl.getPath(); - replaceFiles(tableDest, loadPath, tableDest, tableDest, sessionConf, isSrcLocal); + deleteOldPathForReplace(tableDest, tableDest, sessionConf, + new ValidWriteIds.IdPathFilter(mmWriteId, false), mmWriteId != null); + } + newFiles = listFilesCreatedByQuery(loadPath, mmWriteId); + } else { + // Either a non-MM query, or a load into MM table from an external source. + Path tblPath = tbl.getPath(), destPath = tblPath; + PathFilter filter = FileUtils.HIDDEN_FILES_PATH_FILTER; + if (mmWriteId != null) { + // We will load into MM directory, and delete from the parent if needed. + destPath = new Path(destPath, ValidWriteIds.getMmFilePrefix(mmWriteId)); + filter = replace ? new ValidWriteIds.IdPathFilter(mmWriteId, false) : filter; + } + Utilities.LOG14535.info("moving " + loadPath + " to " + tblPath); + if (replace) { + replaceFiles(tblPath, loadPath, destPath, tblPath, + sessionConf, isSrcLocal, filter, mmWriteId != null); } else { - FileSystem fs; try { - fs = tbl.getDataLocation().getFileSystem(sessionConf); - copyFiles(sessionConf, loadPath, tbl.getPath(), fs, isSrcLocal, isAcid, newFiles); + FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf); + copyFiles(sessionConf, loadPath, destPath, fs, isSrcLocal, isAcid, newFiles); } catch (IOException e) { throw new HiveException("addFiles: filesystem error in check phase", e); } } - } else { - Utilities.LOG14535.info("not moving " + loadPath + " to " + tbl.getPath()); - if (replace) { - Path tableDest = tbl.getPath(); - deleteOldPathForReplace(tableDest, tableDest, sessionConf, - new ValidWriteIds.IdPathFilter(mmWriteId, false)); - } - newFiles = listFilesCreatedByQuery(loadPath, mmWriteId); } if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); @@ -3423,7 +3439,7 @@ private static void moveAcidDeltaFiles(String deltaFileType, PathFilter pathFilt * If the source directory is LOCAL */ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, HiveConf conf, - boolean isSrcLocal) throws HiveException { + boolean isSrcLocal, PathFilter deletePathFilter, boolean isMmTable) throws HiveException { try { FileSystem destFs = destf.getFileSystem(conf); @@ -3442,10 +3458,9 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, } if (oldPath != null) { - deleteOldPathForReplace(destf, oldPath, conf, FileUtils.HIDDEN_FILES_PATH_FILTER); + deleteOldPathForReplace(destf, oldPath, conf, deletePathFilter, isMmTable); } - // TODO# what are the paths that use this? MM tables will need to do this beforehand // first call FileUtils.mkdir to make sure that destf directory exists, if not, it creates // destf with inherited permissions boolean inheritPerms = HiveConf.getBoolVar(conf, HiveConf.ConfVars @@ -3478,9 +3493,9 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, } } - private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, - PathFilter pathFilter) throws HiveException { + PathFilter pathFilter, boolean isMmTable) throws HiveException { + Utilities.LOG14535.info("Deleting old paths for replace in " + destPath + " and old path " + oldPath); boolean isOldPathUnderDestf = false; try { FileSystem oldFs = oldPath.getFileSystem(conf); @@ -3490,7 +3505,7 @@ private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, // But not sure why we changed not to delete the oldPath in HIVE-8750 if it is // not the destf or its subdir? isOldPathUnderDestf = isSubDir(oldPath, destPath, oldFs, destFs, false); - if (isOldPathUnderDestf) { + if (isOldPathUnderDestf || isMmTable) { FileStatus[] statuses = oldFs.listStatus(oldPath, pathFilter); if (statuses == null || statuses.length == 0) return; String s = "Deleting files under " + oldPath + " for replace: "; @@ -3504,7 +3519,7 @@ private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, } } } catch (IOException e) { - if (isOldPathUnderDestf) { + if (isOldPathUnderDestf || isMmTable) { // if oldPath is a subdir of destf but it could not be cleaned throw new HiveException("Directory " + oldPath.toString() + " could not be cleaned up.", e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index a3603783247d..043de2fcbab1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -323,10 +323,10 @@ private Task loadTable(URI fromURI, Table table, boolean replace, Path tgtPat Path tmpPath = ctx.getExternalTmpPath(tgtPath); Task copyTask = TaskFactory.get(new CopyWork(dataPath, tmpPath, false), conf); + // TODO# we assume mm=false here LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath, Utilities.getTableDesc(table), new TreeMap(), - replace); - // TODO# movetask is created here; handle MM tables + replace, null); Task loadTableTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, false), conf); copyTask.addDependentTask(loadTableTask); @@ -397,9 +397,10 @@ private Task addSinglePartition(URI fromURI, FileSystem fs, CreateTableDesc t tmpPath, false), conf); Task addPartTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf); + // TODO# we assume mm=false here LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath, Utilities.getTableDesc(table), - partSpec.getPartSpec(), true); + partSpec.getPartSpec(), true, null); loadTableWork.setInheritTableSpecs(false); // TODO# movetask is created here; handle MM tables Task loadPartTask = TaskFactory.get(new MoveWork( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 96311c1ccef5..e38b0f772700 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -46,6 +47,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.StatsWork; @@ -259,10 +261,20 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } } + // TODO# movetask is created here; handle MM tables + Long mmWriteId = null; + Table tbl = ts.tableHandle; + if (MetaStoreUtils.isMmTable(tbl.getParameters())) { + try { + mmWriteId = db.getNextTableWriteId(tbl.getDbName(), tbl.getTableName()); + } catch (HiveException e) { + throw new SemanticException(e); + } + } LoadTableDesc loadTableWork; loadTableWork = new LoadTableDesc(new Path(fromURI), - Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite); + Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite, mmWriteId); if (preservePartitionSpecs){ // Note : preservePartitionSpecs=true implies inheritTableSpecs=false but // but preservePartitionSpecs=false(default) here is not sufficient enough @@ -270,7 +282,6 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { loadTableWork.setInheritTableSpecs(false); } - // TODO# movetask is created here; handle MM tables Task childTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, true, isLocal), conf); if (rTask != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index 83f2c92eea30..1f84531ff43f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -493,22 +493,6 @@ public void setMmWriteId(Long mmWriteId) { this.mmWriteId = mmWriteId; } - public class FileSinkOperatorExplainVectorization extends OperatorExplainVectorization { - - public FileSinkOperatorExplainVectorization(VectorDesc vectorDesc) { - // Native vectorization not supported. - super(vectorDesc, false); - } - } - - @Explain(vectorization = Vectorization.OPERATOR, displayName = "File Sink Vectorization", explainLevels = { Level.DEFAULT, Level.EXTENDED }) - public FileSinkOperatorExplainVectorization getFileSinkVectorization() { - if (vectorDesc == null) { - return null; - } - return new FileSinkOperatorExplainVectorization(vectorDesc); - } - public void setIsMerge(boolean b) { this.isMerge = b; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java index 7670ef247d62..072148c70c09 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java @@ -60,7 +60,7 @@ public LoadFileDesc(final Path sourcePath, final Path targetDir, final boolean isDfsDir, final String columns, final String columnTypes) { super(sourcePath); - Utilities.LOG14535.info("creating LFD from " + sourcePath + " to " + targetDir, new Exception()); + Utilities.LOG14535.info("creating LFD from " + sourcePath + " to " + targetDir); this.targetDir = targetDir; this.isDfsDir = isDfsDir; this.columns = columns; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index fc8726c977dd..bf858b6cfd54 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -68,9 +68,9 @@ private LoadTableDesc(final Path sourcePath, public LoadTableDesc(final Path sourcePath, final TableDesc table, final Map partitionSpec, - final boolean replace) { - // TODO# we assume mm=false here - this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID, null); + final boolean replace, + final Long mmWriteId) { + this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID, mmWriteId); } public LoadTableDesc(final Path sourcePath, diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index 90523d333b09..5377568461b5 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -177,6 +177,10 @@ select * from merge1_mm; drop table merge1_mm; +set hive.merge.tezfiles=false; +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + -- TODO: need to include merge+union+DP, but it's broken for now @@ -192,9 +196,6 @@ select * from ctas1_mm; drop table ctas1_mm; -set hive.merge.tezfiles=false; -set hive.merge.mapfiles=false; -set hive.merge.mapredfiles=false; drop table iow0_mm; create table iow0_mm(key int) tblproperties('hivecommit'='true'); @@ -223,40 +224,65 @@ drop table iow1_mm; + +drop table load0_mm; +create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true'); +load data local inpath '../../data/files/kv1.txt' into table load0_mm; +select count(1) from load0_mm; +load data local inpath '../../data/files/kv2.txt' into table load0_mm; +select count(1) from load0_mm; +load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm; +select count(1) from load0_mm; +drop table load0_mm; + + +drop table intermediate2; +create table intermediate2 (key string, value string) stored as textfile +location 'file:${system:test.tmp.dir}/intermediate2'; +load data local inpath '../../data/files/kv1.txt' into table intermediate2; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data local inpath '../../data/files/kv3.txt' into table intermediate2; + +drop table load1_mm; +create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true'); +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' into table load1_mm; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv1.txt' into table load1_mm; +select count(1) from load1_mm; +load data local inpath '../../data/files/kv1.txt' into table intermediate2; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data local inpath '../../data/files/kv3.txt' into table intermediate2; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv*.txt' overwrite into table load1_mm; +select count(1) from load1_mm; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' overwrite into table load1_mm; +select count(1) from load1_mm; +drop table load1_mm; + +drop table load2_mm; +create table load2_mm (key string, value string) + partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true'); +load data local inpath '../../data/files/kv1.txt' into table intermediate2; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data local inpath '../../data/files/kv3.txt' into table intermediate2; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv*.txt' into table load2_mm partition(k=5, l=5); +select count(1) from load2_mm; +drop table load2_mm; +drop table intermediate2; + + +-- IMPORT + + + -- TODO# future -- ---create table load_overwrite (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/load_overwrite'; ---create table load_overwrite2 (key string, value string) stored as textfile location 'file:${system:test.tmp.dir}/load2_overwrite2'; --- ---load data local inpath '../../data/files/kv1.txt' into table load_overwrite; ---load data local inpath '../../data/files/kv2.txt' into table load_overwrite; ---load data local inpath '../../data/files/kv3.txt' into table load_overwrite; --- ---show table extended like load_overwrite; ---desc extended load_overwrite; ---select count(*) from load_overwrite; --- ---load data inpath '${system:test.tmp.dir}/load_overwrite/kv*.txt' overwrite into table load_overwrite2; --- --- ---load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10') --- --- --- --- --- --create table exim_department ( dep_id int) stored as textfile; ---load data local inpath "../../data/files/test.dat" into table exim_department; ---dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/temp; --dfs -rmr target/tmp/ql/test/data/exports/exim_department; --export table exim_department to 'ql/test/data/exports/exim_department'; --drop table exim_department; --- --create database importer; --use importer; --- --create table exim_department ( dep_id int) stored as textfile; ---set hive.security.authorization.enabled=true; --import from 'ql/test/data/exports/exim_department'; -- -- @@ -275,8 +301,8 @@ drop table iow1_mm; --- TODO multi-insert +-- TODO multi-insert, truncate -drop table intermediate; \ No newline at end of file +drop table intermediate; diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index 688492886790..391017becafd 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -12,17 +12,50 @@ insert into table intermediate partition(p='455') select distinct key from src w insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; -set hive.merge.tezfiles=true; -set hive.merge.mapfiles=true; -set hive.merge.mapredfiles=true; -set hive.merge.orcfile.stripe.level=true; +drop table load0_mm; +create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true'); +load data local inpath '../../data/files/kv1.txt' into table load0_mm; +select count(1) from load0_mm; +load data local inpath '../../data/files/kv2.txt' into table load0_mm; +select count(1) from load0_mm; +load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm; +select count(1) from load0_mm; +drop table load0_mm; -drop table merge2_mm; -create table merge2_mm(key int) tblproperties('hivecommit'='true'); -insert overwrite table merge2_mm select key from intermediate; -select * from merge2_mm; -drop table merge2_mm; + +drop table intermediate2; +create table intermediate2 (key string, value string) stored as textfile +location 'file:${system:test.tmp.dir}/intermediate2'; +load data local inpath '../../data/files/kv1.txt' into table intermediate2; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data local inpath '../../data/files/kv3.txt' into table intermediate2; + +drop table load1_mm; +create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true'); +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' into table load1_mm; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv1.txt' into table load1_mm; +select count(1) from load1_mm; +load data local inpath '../../data/files/kv1.txt' into table intermediate2; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data local inpath '../../data/files/kv3.txt' into table intermediate2; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv*.txt' overwrite into table load1_mm; +select count(1) from load1_mm; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' overwrite into table load1_mm; +select count(1) from load1_mm; +drop table load1_mm; + +drop table load2_mm; +create table load2_mm (key string, value string) + partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true'); +load data local inpath '../../data/files/kv1.txt' into table intermediate2; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data local inpath '../../data/files/kv3.txt' into table intermediate2; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv*.txt' into table load2_mm partition(k=5, l=5); +select count(1) from load2_mm; +drop table load2_mm; +drop table intermediate2; diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out index 92f32e5d5e29..656936971d62 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -1628,38 +1628,315 @@ POSTHOOK: query: drop table iow1_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@iow1_mm POSTHOOK: Output: default@iow1_mm -PREHOOK: query: -- TODO# future --- +PREHOOK: query: drop table load0_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table load0_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@load0_mm +POSTHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@load0_mm +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm +PREHOOK: type: LOAD #### A masked pattern was here #### --- ---load data local inpath '../../data/files/kv1.txt' into table load_overwrite; ---load data local inpath '../../data/files/kv2.txt' into table load_overwrite; ---load data local inpath '../../data/files/kv3.txt' into table load_overwrite; --- ---show table extended like load_overwrite; ---desc extended load_overwrite; ---select count(*) from load_overwrite; --- +PREHOOK: Output: default@load0_mm +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm +POSTHOOK: type: LOAD #### A masked pattern was here #### --- --- ---load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10') --- --- --- --- +POSTHOOK: Output: default@load0_mm +PREHOOK: query: select count(1) from load0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load0_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load0_mm +#### A masked pattern was here #### +500 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load0_mm +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load0_mm +PREHOOK: query: select count(1) from load0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load0_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load0_mm +#### A masked pattern was here #### +1000 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load0_mm +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load0_mm +PREHOOK: query: select count(1) from load0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load0_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load0_mm +#### A masked pattern was here #### +500 +PREHOOK: query: drop table load0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@load0_mm +PREHOOK: Output: default@load0_mm +POSTHOOK: query: drop table load0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@load0_mm +POSTHOOK: Output: default@load0_mm +PREHOOK: query: drop table intermediate2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermediate2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate2 (key string, value string) stored as textfile +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: create table intermediate2 (key string, value string) stored as textfile +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: drop table load1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table load1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@load1_mm +POSTHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@load1_mm +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +PREHOOK: query: select count(1) from load1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load1_mm +#### A masked pattern was here #### +1000 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +PREHOOK: query: select count(1) from load1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load1_mm +#### A masked pattern was here #### +1050 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +PREHOOK: query: select count(1) from load1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load1_mm +#### A masked pattern was here #### +500 +PREHOOK: query: drop table load1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@load1_mm +PREHOOK: Output: default@load1_mm +POSTHOOK: query: drop table load1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@load1_mm +POSTHOOK: Output: default@load1_mm +PREHOOK: query: drop table load2_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table load2_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table load2_mm (key string, value string) + partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@load2_mm +POSTHOOK: query: create table load2_mm (key string, value string) + partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@load2_mm +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load2_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load2_mm +POSTHOOK: Output: default@load2_mm@k=5/l=5 +PREHOOK: query: select count(1) from load2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load2_mm +PREHOOK: Input: default@load2_mm@k=5/l=5 +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load2_mm +POSTHOOK: Input: default@load2_mm@k=5/l=5 +#### A masked pattern was here #### +1025 +PREHOOK: query: drop table load2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@load2_mm +PREHOOK: Output: default@load2_mm +POSTHOOK: query: drop table load2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@load2_mm +POSTHOOK: Output: default@load2_mm +PREHOOK: query: drop table intermediate2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermediate2 +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: drop table intermediate2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermediate2 +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: -- IMPORT + + + +-- TODO# future -- --create table exim_department ( dep_id int) stored as textfile; ---load data local inpath "../../data/files/test.dat" into table exim_department; #### A masked pattern was here #### --export table exim_department to 'ql/test/data/exports/exim_department'; --drop table exim_department; --- --create database importer; --use importer; --- --create table exim_department ( dep_id int) stored as textfile; ---set hive.security.authorization.enabled=true; --import from 'ql/test/data/exports/exim_department'; -- -- @@ -1677,7 +1954,7 @@ PREHOOK: query: -- TODO# future --- TODO multi-insert +-- TODO multi-insert, truncate @@ -1685,38 +1962,19 @@ drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: -- TODO# future --- -#### A masked pattern was here #### --- ---load data local inpath '../../data/files/kv1.txt' into table load_overwrite; ---load data local inpath '../../data/files/kv2.txt' into table load_overwrite; ---load data local inpath '../../data/files/kv3.txt' into table load_overwrite; --- ---show table extended like load_overwrite; ---desc extended load_overwrite; ---select count(*) from load_overwrite; --- -#### A masked pattern was here #### --- --- ---load data local inpath '../../data/files/orc_split_elim.orc' into table orc_test partition (ds='10') --- --- --- --- +POSTHOOK: query: -- IMPORT + + + +-- TODO# future -- --create table exim_department ( dep_id int) stored as textfile; ---load data local inpath "../../data/files/test.dat" into table exim_department; #### A masked pattern was here #### --export table exim_department to 'ql/test/data/exports/exim_department'; --drop table exim_department; --- --create database importer; --use importer; --- --create table exim_department ( dep_id int) stored as textfile; ---set hive.security.authorization.enabled=true; --import from 'ql/test/data/exports/exim_department'; -- -- @@ -1734,7 +1992,7 @@ POSTHOOK: query: -- TODO# future --- TODO multi-insert +-- TODO multi-insert, truncate diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index 934a5fb0f0b6..7ccc2ee4b66c 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -28,51 +28,302 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: drop table merge2_mm +PREHOOK: query: drop table load0_mm PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table merge2_mm +POSTHOOK: query: drop table load0_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: create table merge2_mm(key int) tblproperties('hivecommit'='true') +PREHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@merge2_mm -POSTHOOK: query: create table merge2_mm(key int) tblproperties('hivecommit'='true') +PREHOOK: Output: default@load0_mm +POSTHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@merge2_mm -PREHOOK: query: insert overwrite table merge2_mm select key from intermediate +POSTHOOK: Output: default@load0_mm +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load0_mm +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load0_mm +PREHOOK: query: select count(1) from load0_mm PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@merge2_mm -POSTHOOK: query: insert overwrite table merge2_mm select key from intermediate +PREHOOK: Input: default@load0_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load0_mm POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@merge2_mm -POSTHOOK: Lineage: merge2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from merge2_mm +POSTHOOK: Input: default@load0_mm +#### A masked pattern was here #### +500 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load0_mm +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load0_mm +PREHOOK: query: select count(1) from load0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load0_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load0_mm +#### A masked pattern was here #### +1000 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load0_mm +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load0_mm +PREHOOK: query: select count(1) from load0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load0_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load0_mm +#### A masked pattern was here #### +500 +PREHOOK: query: drop table load0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@load0_mm +PREHOOK: Output: default@load0_mm +POSTHOOK: query: drop table load0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@load0_mm +POSTHOOK: Output: default@load0_mm +PREHOOK: query: drop table intermediate2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermediate2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate2 (key string, value string) stored as textfile +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: create table intermediate2 (key string, value string) stored as textfile +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: drop table load1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table load1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@load1_mm +POSTHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@load1_mm +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +PREHOOK: query: select count(1) from load1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load1_mm +#### A masked pattern was here #### +1000 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +PREHOOK: query: select count(1) from load1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load1_mm +#### A masked pattern was here #### +1050 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +PREHOOK: query: select count(1) from load1_mm PREHOOK: type: QUERY -PREHOOK: Input: default@merge2_mm +PREHOOK: Input: default@load1_mm #### A masked pattern was here #### -POSTHOOK: query: select * from merge2_mm +POSTHOOK: query: select count(1) from load1_mm POSTHOOK: type: QUERY -POSTHOOK: Input: default@merge2_mm +POSTHOOK: Input: default@load1_mm #### A masked pattern was here #### -98 -97 -0 -10 -PREHOOK: query: drop table merge2_mm +500 +PREHOOK: query: drop table load1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@load1_mm +PREHOOK: Output: default@load1_mm +POSTHOOK: query: drop table load1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@load1_mm +POSTHOOK: Output: default@load1_mm +PREHOOK: query: drop table load2_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table load2_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table load2_mm (key string, value string) + partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@load2_mm +POSTHOOK: query: create table load2_mm (key string, value string) + partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@load2_mm +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load2_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load2_mm +POSTHOOK: Output: default@load2_mm@k=5/l=5 +PREHOOK: query: select count(1) from load2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load2_mm +PREHOOK: Input: default@load2_mm@k=5/l=5 +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load2_mm +POSTHOOK: Input: default@load2_mm@k=5/l=5 +#### A masked pattern was here #### +1025 +PREHOOK: query: drop table load2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@load2_mm +PREHOOK: Output: default@load2_mm +POSTHOOK: query: drop table load2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@load2_mm +POSTHOOK: Output: default@load2_mm +PREHOOK: query: drop table intermediate2 PREHOOK: type: DROPTABLE -PREHOOK: Input: default@merge2_mm -PREHOOK: Output: default@merge2_mm -POSTHOOK: query: drop table merge2_mm +PREHOOK: Input: default@intermediate2 +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: drop table intermediate2 POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@merge2_mm -POSTHOOK: Output: default@merge2_mm +POSTHOOK: Input: default@intermediate2 +POSTHOOK: Output: default@intermediate2 PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate From 8004f71e50c682a664215e04e2b387195c5932da Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Thu, 20 Oct 2016 15:05:24 -0700 Subject: [PATCH 16/24] HIVE-15019 : handle import for MM tables (Sergey Shelukhin) --- .../hadoop/hive/common/ValidWriteIds.java | 15 +- .../org/apache/hadoop/hive/conf/HiveConf.java | 2 + .../hadoop/hive/metastore/HiveMetaStore.java | 3 + .../apache/hadoop/hive/ql/exec/CopyTask.java | 48 +- .../apache/hadoop/hive/ql/exec/DDLTask.java | 30 +- .../ql/exec/DependencyCollectionTask.java | 1 - .../hadoop/hive/ql/exec/ImportCommitTask.java | 65 ++ .../hadoop/hive/ql/exec/ImportCommitWork.java | 48 ++ .../apache/hadoop/hive/ql/exec/MoveTask.java | 10 +- .../hadoop/hive/ql/exec/TaskFactory.java | 2 + .../apache/hadoop/hive/ql/metadata/Hive.java | 7 +- .../hive/ql/parse/DDLSemanticAnalyzer.java | 8 +- .../apache/hadoop/hive/ql/parse/EximUtil.java | 6 +- .../hive/ql/parse/ImportSemanticAnalyzer.java | 206 +++--- .../hive/ql/parse/LoadSemanticAnalyzer.java | 1 - .../hive/ql/parse/SemanticAnalyzer.java | 2 +- .../hadoop/hive/ql/parse/TaskCompiler.java | 2 +- .../apache/hadoop/hive/ql/plan/CopyWork.java | 9 + .../hadoop/hive/ql/plan/CreateTableDesc.java | 14 +- .../hadoop/hive/ql/plan/LoadTableDesc.java | 14 +- ql/src/test/queries/clientpositive/mm_all.q | 119 +++- .../test/queries/clientpositive/mm_current.q | 83 ++- .../results/clientpositive/llap/mm_all.q.out | 586 ++++++++++++++++-- .../clientpositive/llap/mm_current.q.out | 447 ++++++------- 24 files changed, 1209 insertions(+), 519 deletions(-) create mode 100644 ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java create mode 100644 ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java index 7ef4f550c610..df0278cd3780 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java +++ b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java @@ -137,7 +137,20 @@ public boolean accept(Path path) { } } - + public static class AnyIdDirFilter implements PathFilter { + @Override + public boolean accept(Path path) { + String name = path.getName(); + if (!name.startsWith(MM_PREFIX + "_")) return false; + String idStr = name.substring(MM_PREFIX.length() + 1); + try { + Long.parseLong(idStr); + } catch (NumberFormatException ex) { + return false; + } + return true; + } + } public static Long extractWriteId(Path file) { String fileName = file.getName(); String[] parts = fileName.split("_", 3); diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index ccc29f85150c..8a00f07500dd 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1209,6 +1209,8 @@ public static enum ConfVars { HIVETESTMODE("hive.test.mode", false, "Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.", false), + HIVEEXIMTESTMODE("hive.exim.test.mode", false, + "The subset of test mode that only enables custom path handling for ExIm.", false), HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_", "In test mode, specfies prefixes for the output table", false), HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32, diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 9f16eb26fbc4..4436f3a56fab 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -6570,6 +6570,9 @@ public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) private MTableWrite getActiveTableWrite(RawStore ms, String dbName, String tblName, long writeId) throws MetaException { MTableWrite tw = ms.getTableWrite(dbName, tblName, writeId); + if (tw == null) { + return null; + } assert tw.getState().length() == 1; char state = tw.getState().charAt(0); if (state != MM_WRITE_OPEN) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java index cbe0aca142da..a8a44bcd67d1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java @@ -18,14 +18,20 @@ package org.apache.hadoop.hive.ql.exec; +import java.io.FileNotFoundException; +import java.io.IOException; import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer; @@ -37,7 +43,6 @@ * CopyTask implementation. **/ public class CopyTask extends Task implements Serializable { - private static final long serialVersionUID = 1L; private static transient final Logger LOG = LoggerFactory.getLogger(CopyTask.class); @@ -60,7 +65,7 @@ public int execute(DriverContext driverContext) { FileSystem srcFs = fromPath.getFileSystem(conf); dstFs = toPath.getFileSystem(conf); - FileStatus[] srcs = LoadSemanticAnalyzer.matchFilesOrDir(srcFs, fromPath); + FileStatus[] srcs = matchFilesOrDir(srcFs, fromPath, work.isSourceMm()); if (srcs == null || srcs.length == 0) { if (work.isErrorOnSrcEmpty()) { console.printError("No files matching path: " + fromPath.toString()); @@ -97,6 +102,45 @@ public int execute(DriverContext driverContext) { } } + // Note: initially copied from LoadSemanticAnalyzer. + private static FileStatus[] matchFilesOrDir( + FileSystem fs, Path path, boolean isSourceMm) throws IOException { + if (!isSourceMm) return matchFilesOneDir(fs, path, null); + // TODO: this doesn't handle list bucketing properly. Does the original exim do that? + FileStatus[] mmDirs = fs.listStatus(path, new ValidWriteIds.AnyIdDirFilter()); + if (mmDirs == null || mmDirs.length == 0) return null; + List allFiles = new ArrayList(); + for (FileStatus mmDir : mmDirs) { + Utilities.LOG14535.info("Found source MM directory " + mmDir.getPath()); + matchFilesOneDir(fs, mmDir.getPath(), allFiles); + } + return allFiles.toArray(new FileStatus[allFiles.size()]); + } + + private static FileStatus[] matchFilesOneDir( + FileSystem fs, Path path, List result) throws IOException { + FileStatus[] srcs = fs.globStatus(path, new EximPathFilter()); + if (srcs != null && srcs.length == 1) { + if (srcs[0].isDirectory()) { + srcs = fs.listStatus(srcs[0].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); + } + } + if (result != null && srcs != null) { + for (int i = 0; i < srcs.length; ++i) { + result.add(srcs[i]); + } + } + return srcs; + } + + private static final class EximPathFilter implements PathFilter { + @Override + public boolean accept(Path p) { + String name = p.getName(); + return name.equals("_metadata") ? true : !name.startsWith("_") && !name.startsWith("."); + } + } + @Override public StageType getType() { return StageType.COPY; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index bb9eaf5254aa..1f89f2746052 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -4045,7 +4045,7 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { tbl.getDataLocation()); // create the table - if (crtTbl.getReplaceMode()){ + if (crtTbl.getReplaceMode()) { // replace-mode creates are really alters using CreateTableDesc. try { db.alterTable(tbl.getDbName()+"."+tbl.getTableName(),tbl,null); @@ -4059,28 +4059,36 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { } else { db.createTable(tbl, crtTbl.getIfNotExists()); } - if (crtTbl.isCTAS()) { + Long mmWriteId = crtTbl.getInitialMmWriteId(); + if (crtTbl.isCTAS() || mmWriteId != null) { Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName()); - if (crtTbl.getInitialWriteId() != null) { + if (mmWriteId != null) { // TODO# this would be retrieved via ACID before the query runs; for now we rely on it // being zero at start; we can't create a write ID before we create the table here. long initialWriteId = db.getNextTableWriteId(tbl.getDbName(), tbl.getTableName()); - if (initialWriteId != crtTbl.getInitialWriteId()) { - throw new HiveException("Initial write ID mismatch - expected " - + crtTbl.getInitialWriteId() + " but got " + initialWriteId); + if (initialWriteId != mmWriteId) { + throw new HiveException("Initial write ID mismatch - expected " + mmWriteId + + " but got " + initialWriteId); + } + // CTAS create the table on a directory that already exists; import creates the table + // first (in parallel with copies?), then commits after all the loads. + if (crtTbl.isCTAS()) { + db.commitMmTableWrite(tbl, initialWriteId); } - db.commitMmTableWrite(tbl, initialWriteId); } - DataContainer dc = new DataContainer(createdTable.getTTable()); - SessionState.get().getLineageState().setLineage( - createdTable.getPath(), dc, createdTable.getCols() - ); + if (crtTbl.isCTAS()) { + DataContainer dc = new DataContainer(createdTable.getTTable()); + SessionState.get().getLineageState().setLineage( + createdTable.getPath(), dc, createdTable.getCols() + ); + } } } work.getOutputs().add(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); return 0; } + /** * Create a new table like an existing table. * diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DependencyCollectionTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DependencyCollectionTask.java index 9189cfc6f302..e6395724f683 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DependencyCollectionTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DependencyCollectionTask.java @@ -20,7 +20,6 @@ import java.io.Serializable; -import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.api.StageType; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java new file mode 100644 index 000000000000..efa9bc37dca3 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec; + +import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; +import org.apache.hadoop.hive.ql.plan.api.StageType; +import org.apache.hadoop.util.StringUtils; + +public class ImportCommitTask extends Task { + + private static final long serialVersionUID = 1L; + + public ImportCommitTask() { + super(); + } + + @Override + public int execute(DriverContext driverContext) { + Utilities.LOG14535.info("Executing ImportCommit for " + work.getMmWriteId()); + + try { + if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { + return 0; + } + Hive db = getHive(); + Table tbl = db.getTable(work.getDbName(), work.getTblName()); + db.commitMmTableWrite(tbl, work.getMmWriteId()); + return 0; + } catch (Exception e) { + console.printError("Failed with exception " + e.getMessage(), "\n" + + StringUtils.stringifyException(e)); + setException(e); + return 1; + } + } + + @Override + public StageType getType() { + return StageType.MOVE; // The commit for import is normally done as part of MoveTask. + } + + @Override + public String getName() { + return "IMPORT_COMMIT"; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java new file mode 100644 index 000000000000..f62d23718dc8 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "Import Commit", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ImportCommitWork implements Serializable { + private static final long serialVersionUID = 1L; + private String dbName, tblName; + private long mmWriteId; + + public ImportCommitWork(String dbName, String tblName, long mmWriteId) { + this.mmWriteId = mmWriteId; + this.dbName = dbName; + this.tblName = tblName; + } + + public long getMmWriteId() { + return mmWriteId; + } + + public String getDbName() { + return dbName; + } + + public String getTblName() { + return tblName; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index eea435710a13..99c52fad2f07 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -326,6 +326,10 @@ public int execute(DriverContext driverContext) { if (tbd.getPartitionSpec().size() == 0) { dc = new DataContainer(table.getTTable()); Utilities.LOG14535.info("loadTable called from " + tbd.getSourcePath() + " into " + tbd.getTable().getTableName()); + if (tbd.isMmTable() && !tbd.isCommitMmWrite()) { + throw new HiveException( + "Only single-partition LoadTableDesc can skip commiting write ID"); + } db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getReplace(), work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isAcid, hasFollowingStatsTask(), tbd.getMmWriteId()); @@ -386,12 +390,13 @@ private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd, db.validatePartitionNameCharacters(partVals); Utilities.LOG14535.info("loadPartition called from " + tbd.getSourcePath() + " into " + tbd.getTable().getTableName()); + boolean isCommitMmWrite = tbd.isCommitMmWrite(); db.loadSinglePartition(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getPartitionSpec(), tbd.getReplace(), tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(), (work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID && work.getLoadTableWork().getWriteType() != AcidUtils.Operation.INSERT_ONLY), - hasFollowingStatsTask(), tbd.getMmWriteId()); + hasFollowingStatsTask(), tbd.getMmWriteId(), isCommitMmWrite); Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); if (ti.bucketCols != null || ti.sortCols != null) { @@ -428,6 +433,9 @@ private DataContainer handleDynParts(Hive db, Table table, LoadTableDesc tbd, // iterate over it and call loadPartition() here. // The reason we don't do inside HIVE-1361 is the latter is large and we // want to isolate any potential issue it may introduce. + if (tbd.isMmTable() && !tbd.isCommitMmWrite()) { + throw new HiveException("Only single-partition LoadTableDesc can skip commiting write ID"); + } Map, Partition> dp = db.loadDynamicPartitions( tbd.getSourcePath(), diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index 14fd61a99e62..822ff41c2c96 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -100,6 +100,8 @@ public TaskTuple(Class workClass, Class> taskClass) { MergeFileTask.class)); taskvec.add(new TaskTuple(DependencyCollectionWork.class, DependencyCollectionTask.class)); + taskvec.add(new TaskTuple(ImportCommitWork.class, + ImportCommitTask.class)); taskvec.add(new TaskTuple(PartialScanWork.class, PartialScanTask.class)); taskvec.add(new TaskTuple(IndexMetadataChangeWork.class, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index d6dc2d3c5b8c..30b22d70eb2f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1502,14 +1502,15 @@ public Database getDatabaseCurrent() throws HiveException { public void loadSinglePartition(Path loadPath, String tableName, Map partSpec, boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcid, - boolean hasFollowingStatsTask, Long mmWriteId) throws HiveException { + boolean hasFollowingStatsTask, Long mmWriteId, boolean isCommitMmWrite) + throws HiveException { Table tbl = getTable(tableName); boolean isMmTableWrite = (mmWriteId != null); Preconditions.checkState(isMmTableWrite == MetaStoreUtils.isMmTable(tbl.getParameters())); loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs, isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask, mmWriteId); - if (isMmTableWrite) { - // The assumption behind committing here is that this partition is the only one outputted + if (isMmTableWrite && isCommitMmWrite) { + // The assumption behind committing here is that this partition is the only one outputted. commitMmTableWrite(tbl, mmWriteId); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index 03c2e79815c9..a018b5432f01 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1060,10 +1060,10 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { // so the operation is atomic. Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc); truncateTblDesc.setOutputDir(queryTmpdir); + // TODO# movetask is created here; handle MM tables LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap() : partSpec); + partSpec == null ? new HashMap() : partSpec, null); ltd.setLbCtx(lbCtx); - // TODO# movetask is created here; handle MM tables Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); truncateTask.addDependentTask(moveTsk); @@ -1677,10 +1677,10 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, TableDesc tblDesc = Utilities.getTableDesc(tblObj); Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc); mergeDesc.setOutputDir(queryTmpdir); + // No need to handle MM tables - unsupported path. LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap() : partSpec); + partSpec == null ? new HashMap() : partSpec, null); ltd.setLbCtx(lbCtx); - // No need to handle MM tables - unsupported path. Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); mergeTask.addDependentTask(moveTsk); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java index 167f7a59fe8f..3826d9ff51a9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java @@ -74,7 +74,8 @@ private EximUtil() { */ static URI getValidatedURI(HiveConf conf, String dcPath) throws SemanticException { try { - boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE); + boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE) + || conf.getBoolVar(HiveConf.ConfVars.HIVEEXIMTESTMODE); URI uri = new Path(dcPath).toUri(); String scheme = uri.getScheme(); String authority = uri.getAuthority(); @@ -136,7 +137,8 @@ static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws } public static String relativeToAbsolutePath(HiveConf conf, String location) throws SemanticException { - boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE); + boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE) + || conf.getBoolVar(HiveConf.ConfVars.HIVEEXIMTESTMODE); if (testMode) { URI uri = new Path(location).toUri(); String scheme = uri.getScheme(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 043de2fcbab1..2a525e70064c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -38,7 +38,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; @@ -48,6 +50,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.exec.ImportCommitWork; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -121,6 +124,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } // get partition metadata if partition specified if (child.getChildCount() == 2) { + @SuppressWarnings("unused") // TODO: wtf? ASTNode partspec = (ASTNode) child.getChild(1); isPartSpecSet = true; parsePartitionSpec(child, parsedPartSpec); @@ -158,9 +162,13 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { // Create table associated with the import // Executed if relevant, and used to contain all the other details about the table if not. - CreateTableDesc tblDesc = getBaseCreateTableDescFromTable(dbname,rv.getTable()); + CreateTableDesc tblDesc = getBaseCreateTableDescFromTable(dbname, rv.getTable()); + boolean isSourceMm = MetaStoreUtils.isMmTable(tblDesc.getTblProps()); - if (isExternalSet){ + if (isExternalSet) { + if (isSourceMm) { + throw new SemanticException("Cannot import an MM table as external"); + } tblDesc.setExternal(isExternalSet); // This condition-check could have been avoided, but to honour the old // default of not calling if it wasn't set, we retain that behaviour. @@ -219,21 +227,32 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { Table table = tableIfExists(tblDesc); if (table != null){ - checkTable(table, tblDesc,replicationSpec); + checkTable(table, tblDesc, replicationSpec); LOG.debug("table " + tblDesc.getTableName() + " exists: metadata checked"); tableExists = true; } + Long mmWriteId = null; + if (table != null && MetaStoreUtils.isMmTable(table.getParameters())) { + mmWriteId = db.getNextTableWriteId(table.getDbName(), table.getTableName()); + } else if (table == null && isSourceMm) { + // We could import everything as is - directories and IDs, but that won't work with ACID + // txn ids in future. So, let's import everything into the new MM directory with ID == 0. + mmWriteId = 0l; + } + if (mmWriteId != null) { + tblDesc.setInitialMmWriteId(mmWriteId); + } if (!replicationSpec.isInReplicationScope()){ createRegularImportTasks( rootTasks, tblDesc, partitionDescs, isPartSpecSet, replicationSpec, table, - fromURI, fs, wh); + fromURI, fs, wh, mmWriteId, isSourceMm); } else { createReplImportTasks( rootTasks, tblDesc, partitionDescs, isPartSpecSet, replicationSpec, table, - fromURI, fs, wh); + fromURI, fs, wh, mmWriteId, isSourceMm); } } catch (SemanticException e) { throw e; @@ -318,45 +337,42 @@ private CreateTableDesc getBaseCreateTableDescFromTable(String dbName, return tblDesc; } - private Task loadTable(URI fromURI, Table table, boolean replace, Path tgtPath) { + private Task loadTable(URI fromURI, Table table, boolean replace, Path tgtPath, + Long mmWriteId, boolean isSourceMm) { Path dataPath = new Path(fromURI.toString(), "data"); - Path tmpPath = ctx.getExternalTmpPath(tgtPath); - Task copyTask = TaskFactory.get(new CopyWork(dataPath, - tmpPath, false), conf); - // TODO# we assume mm=false here - LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath, - Utilities.getTableDesc(table), new TreeMap(), - replace, null); - Task loadTableTask = TaskFactory.get(new MoveWork(getInputs(), - getOutputs(), loadTableWork, null, false), conf); + Path destPath = mmWriteId == null ? ctx.getExternalTmpPath(tgtPath) + : new Path(tgtPath, ValidWriteIds.getMmFilePrefix(mmWriteId)); + Utilities.LOG14535.info("adding import work for table with source location: " + + dataPath + "; table: " + tgtPath + "; copy destination " + destPath + "; mm " + + mmWriteId + " (src " + isSourceMm + ") for " + (table == null ? "a new table" : table.getTableName())); + + CopyWork cv = new CopyWork(dataPath, destPath, false); + cv.setIsSourceMm(isSourceMm); + LoadTableDesc loadTableWork = new LoadTableDesc(destPath, + Utilities.getTableDesc(table), new TreeMap(), replace, mmWriteId); + MoveWork mv = new MoveWork(getInputs(), getOutputs(), loadTableWork, null, false); + @SuppressWarnings("unchecked") + Task loadTableTask = TaskFactory.get(mv, conf), copyTask = TaskFactory.get(cv, conf); copyTask.addDependentTask(loadTableTask); rootTasks.add(copyTask); return loadTableTask; } + @SuppressWarnings("unchecked") private Task createTableTask(CreateTableDesc tableDesc){ - return TaskFactory.get(new DDLWork( - getInputs(), - getOutputs(), - tableDesc - ), conf); + return TaskFactory.get(new DDLWork(getInputs(), getOutputs(), tableDesc), conf); } + @SuppressWarnings("unchecked") private Task dropTableTask(Table table){ - return TaskFactory.get(new DDLWork( - getInputs(), - getOutputs(), - new DropTableDesc(table.getTableName(), null, true, true, null) - ), conf); + return TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + new DropTableDesc(table.getTableName(), null, true, true, null)), conf); } + @SuppressWarnings("unchecked") private Task alterTableTask(CreateTableDesc tableDesc) { tableDesc.setReplaceMode(true); - return TaskFactory.get(new DDLWork( - getInputs(), - getOutputs(), - tableDesc - ), conf); + return TaskFactory.get(new DDLWork(getInputs(), getOutputs(), tableDesc), conf); } private Task alterSinglePartition( @@ -365,50 +381,54 @@ private Task alterSinglePartition( ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn) { addPartitionDesc.setReplaceMode(true); addPartitionDesc.getPartition(0).setLocation(ptn.getLocation()); // use existing location - return TaskFactory.get(new DDLWork( - getInputs(), - getOutputs(), - addPartitionDesc - ), conf); + @SuppressWarnings("unchecked") + Task r = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf); + return r; } private Task addSinglePartition(URI fromURI, FileSystem fs, CreateTableDesc tblDesc, - Table table, Warehouse wh, - AddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec) + Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, + ReplicationSpec replicationSpec, Long mmWriteId, boolean isSourceMm, Task commitTask) throws MetaException, IOException, HiveException { AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0); if (tblDesc.isExternal() && tblDesc.getLocation() == null) { LOG.debug("Importing in-place: adding AddPart for partition " + partSpecToString(partSpec.getPartSpec())); // addPartitionDesc already has the right partition location + @SuppressWarnings("unchecked") Task addPartTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf); return addPartTask; } else { String srcLocation = partSpec.getLocation(); fixLocationInPartSpec(fs, tblDesc, table, wh, replicationSpec, partSpec); - LOG.debug("adding dependent CopyWork/AddPart/MoveWork for partition " - + partSpecToString(partSpec.getPartSpec()) - + " with source location: " + srcLocation); Path tgtLocation = new Path(partSpec.getLocation()); - Path tmpPath = ctx.getExternalTmpPath(tgtLocation); - Task copyTask = TaskFactory.get(new CopyWork(new Path(srcLocation), - tmpPath, false), conf); - Task addPartTask = TaskFactory.get(new DDLWork(getInputs(), - getOutputs(), addPartitionDesc), conf); - // TODO# we assume mm=false here - LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath, - Utilities.getTableDesc(table), - partSpec.getPartSpec(), true, null); + Path destPath = mmWriteId == null ? ctx.getExternalTmpPath(tgtLocation) + : new Path(tgtLocation, ValidWriteIds.getMmFilePrefix(mmWriteId)); + Path moveTaskSrc = mmWriteId == null ? destPath : tgtLocation; + Utilities.LOG14535.info("adding import work for partition with source location: " + + srcLocation + "; target: " + tgtLocation + "; copy dest " + destPath + "; mm " + + mmWriteId + " (src " + isSourceMm + ") for " + partSpecToString(partSpec.getPartSpec())); + CopyWork cw = new CopyWork(new Path(srcLocation), destPath, false); + cw.setIsSourceMm(isSourceMm); + DDLWork dw = new DDLWork(getInputs(), getOutputs(), addPartitionDesc); + LoadTableDesc loadTableWork = new LoadTableDesc(moveTaskSrc, Utilities.getTableDesc(table), + partSpec.getPartSpec(), true, mmWriteId); loadTableWork.setInheritTableSpecs(false); - // TODO# movetask is created here; handle MM tables - Task loadPartTask = TaskFactory.get(new MoveWork( - getInputs(), getOutputs(), loadTableWork, null, false), - conf); + // Do not commit the write ID from each task; need to commit once. + // TODO: we should just change the import to use a single MoveTask, like dynparts. + loadTableWork.setIntermediateInMmWrite(mmWriteId != null); + MoveWork mv = new MoveWork(getInputs(), getOutputs(), loadTableWork, null, false); + @SuppressWarnings("unchecked") + Task copyTask = TaskFactory.get(cw, conf), addPartTask = TaskFactory.get(dw, conf), + loadPartTask = TaskFactory.get(mv, conf); copyTask.addDependentTask(loadPartTask); addPartTask.addDependentTask(loadPartTask); rootTasks.add(copyTask); + if (commitTask != null) { + loadPartTask.addDependentTask(commitTask); + } return addPartTask; } } @@ -572,13 +592,11 @@ private void checkTable(Table table, CreateTableDesc tableDesc, ReplicationSpec Class replaced = HiveFileFormatUtils .getOutputFormatSubstitute(origin); if (replaced == null) { - throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE - .getMsg()); + throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg()); } importedofc = replaced.getCanonicalName(); } catch(Exception e) { - throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE - .getMsg()); + throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg()); } if ((!existingifc.equals(importedifc)) || (!existingofc.equals(importedofc))) { @@ -685,43 +703,42 @@ private static String checkParams(Map map1, /** * Create tasks for regular import, no repl complexity */ - private void createRegularImportTasks( - List> rootTasks, - CreateTableDesc tblDesc, - List partitionDescs, - boolean isPartSpecSet, - ReplicationSpec replicationSpec, - Table table, URI fromURI, FileSystem fs, Warehouse wh) + private void createRegularImportTasks(List> rootTasks, + CreateTableDesc tblDesc, List partitionDescs, boolean isPartSpecSet, + ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh, + Long mmWriteId, boolean isSourceMm) throws HiveException, URISyntaxException, IOException, MetaException { - if (table != null){ + if (table != null) { if (table.isPartitioned()) { LOG.debug("table partitioned"); + Task ict = createImportCommitTask(table.getDbName(), table.getTableName(), mmWriteId); for (AddPartitionDesc addPartitionDesc : partitionDescs) { Map partSpec = addPartitionDesc.getPartition(0).getPartSpec(); org.apache.hadoop.hive.ql.metadata.Partition ptn = null; if ((ptn = db.getPartition(table, partSpec, false)) == null) { - rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec)); + rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, + replicationSpec, mmWriteId, isSourceMm, ict)); } else { throw new SemanticException( ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec))); } } - } else { LOG.debug("table non-partitioned"); // ensure if destination is not empty only for regular import Path tgtPath = new Path(table.getDataLocation().toString()); FileSystem tgtFs = FileSystem.get(tgtPath.toUri(), conf); checkTargetLocationEmpty(tgtFs, tgtPath, replicationSpec); - loadTable(fromURI, table, false, tgtPath); + loadTable(fromURI, table, false, tgtPath, mmWriteId, isSourceMm); } // Set this to read because we can't overwrite any existing partitions outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK)); } else { LOG.debug("table " + tblDesc.getTableName() + " does not exist"); + @SuppressWarnings("unchecked") Task t = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), tblDesc), conf); table = new Table(tblDesc.getDatabaseName(), tblDesc.getTableName()); Database parentDb = db.getDatabase(tblDesc.getDatabaseName()); @@ -731,9 +748,11 @@ private void createRegularImportTasks( outputs.add(new WriteEntity(parentDb, WriteEntity.WriteType.DDL_SHARED)); if (isPartitioned(tblDesc)) { + Task ict = createImportCommitTask( + tblDesc.getDatabaseName(), tblDesc.getTableName(), mmWriteId); for (AddPartitionDesc addPartitionDesc : partitionDescs) { - t.addDependentTask( - addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec)); + t.addDependentTask(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, + replicationSpec, mmWriteId, isSourceMm, ict)); } } else { LOG.debug("adding dependent CopyWork/MoveWork for table"); @@ -750,24 +769,30 @@ private void createRegularImportTasks( } FileSystem tgtFs = FileSystem.get(tablePath.toUri(), conf); checkTargetLocationEmpty(tgtFs, tablePath, replicationSpec); - t.addDependentTask(loadTable(fromURI, table, false, tablePath)); + t.addDependentTask(loadTable(fromURI, table, false, tablePath, mmWriteId, isSourceMm)); } } rootTasks.add(t); } } + private Task createImportCommitTask(String dbName, String tblName, Long mmWriteId) { + @SuppressWarnings("unchecked") + Task ict = (mmWriteId == null) ? null : TaskFactory.get( + new ImportCommitWork(dbName, tblName, mmWriteId), conf); + return ict; + } + /** * Create tasks for repl import */ - private void createReplImportTasks( - List> rootTasks, - CreateTableDesc tblDesc, - List partitionDescs, - boolean isPartSpecSet, ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh) + private void createReplImportTasks(List> rootTasks, + CreateTableDesc tblDesc, List partitionDescs, boolean isPartSpecSet, + ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh, + Long mmWriteId, boolean isSourceMm) throws HiveException, URISyntaxException, IOException, MetaException { - Task dr = null; + Task dr = null; WriteEntity.WriteType lockType = WriteEntity.WriteType.DDL_NO_LOCK; if ((table != null) && (isPartitioned(tblDesc) != table.isPartitioned())){ @@ -810,18 +835,21 @@ private void createReplImportTasks( lockType = WriteEntity.WriteType.DDL_SHARED; } - Task t = createTableTask(tblDesc); + Task t = createTableTask(tblDesc); table = new Table(tblDesc.getDatabaseName(), tblDesc.getTableName()); if (!replicationSpec.isMetadataOnly()) { if (isPartitioned(tblDesc)) { + Task ict = createImportCommitTask( + tblDesc.getDatabaseName(), tblDesc.getTableName(), mmWriteId); for (AddPartitionDesc addPartitionDesc : partitionDescs) { - t.addDependentTask( - addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec)); + t.addDependentTask(addSinglePartition(fromURI, fs, tblDesc, table, wh, + addPartitionDesc, replicationSpec, mmWriteId, isSourceMm, ict)); } } else { LOG.debug("adding dependent CopyWork/MoveWork for table"); - t.addDependentTask(loadTable(fromURI, table, true, new Path(tblDesc.getLocation()))); + t.addDependentTask(loadTable( + fromURI, table, true, new Path(tblDesc.getLocation()), mmWriteId, isSourceMm)); } } if (dr == null){ @@ -837,22 +865,25 @@ private void createReplImportTasks( if (table.isPartitioned()) { LOG.debug("table partitioned"); for (AddPartitionDesc addPartitionDesc : partitionDescs) { - Map partSpec = addPartitionDesc.getPartition(0).getPartSpec(); org.apache.hadoop.hive.ql.metadata.Partition ptn = null; - + Task ict = replicationSpec.isMetadataOnly() ? null : createImportCommitTask( + tblDesc.getDatabaseName(), tblDesc.getTableName(), mmWriteId); if ((ptn = db.getPartition(table, partSpec, false)) == null) { if (!replicationSpec.isMetadataOnly()){ - rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec)); + rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, + replicationSpec, mmWriteId, isSourceMm, ict)); } } else { // If replicating, then the partition already existing means we need to replace, maybe, if // the destination ptn's repl.last.id is older than the replacement's. if (replicationSpec.allowReplacementInto(ptn)){ if (!replicationSpec.isMetadataOnly()){ - rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec)); + rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, + addPartitionDesc, replicationSpec, mmWriteId, isSourceMm, ict)); } else { - rootTasks.add(alterSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, ptn)); + rootTasks.add(alterSinglePartition(fromURI, fs, tblDesc, table, wh, + addPartitionDesc, replicationSpec, ptn)); } if (lockType == WriteEntity.WriteType.DDL_NO_LOCK){ lockType = WriteEntity.WriteType.DDL_SHARED; @@ -876,7 +907,8 @@ private void createReplImportTasks( return; // silently return, table is newer than our replacement. } if (!replicationSpec.isMetadataOnly()) { - loadTable(fromURI, table, true, new Path(fromURI)); // repl-imports are replace-into + // repl-imports are replace-into + loadTable(fromURI, table, true, new Path(fromURI), mmWriteId, isSourceMm); } else { rootTasks.add(alterTableTask(tblDesc)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index e38b0f772700..26274f56b22e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -261,7 +261,6 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } } - // TODO# movetask is created here; handle MM tables Long mmWriteId = null; Table tbl = ts.tableHandle; if (MetaStoreUtils.isMmTable(tbl.getParameters())) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index f74c0a951919..0d83abf63d8d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -6692,7 +6692,7 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) // TODO# this should really get current ACID txn; assuming ACID works correctly the txn // should have been opened to create the ACID table. For now use the first ID. mmWriteId = 0l; - tblDesc.setInitialWriteId(mmWriteId); + tblDesc.setInitialMmWriteId(mmWriteId); } } else if (viewDesc != null) { field_schemas = new ArrayList(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index e1779250ba1b..d09e40108af0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -317,7 +317,7 @@ private void setLoadFileLocation( if (pCtx.getQueryProperties().isCTAS()) { CreateTableDesc ctd = pCtx.getCreateTable(); dataSinkForCtas = ctd.getAndUnsetWriter(); - mmWriteIdForCtas = ctd.getInitialWriteId(); + mmWriteIdForCtas = ctd.getInitialMmWriteId(); loc = ctd.getLocation(); } else { loc = pCtx.getCreateViewDesc().getLocation(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java index 9a4e782af89f..2e484baaf976 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java @@ -33,6 +33,7 @@ public class CopyWork implements Serializable { private Path fromPath; private Path toPath; private boolean errorOnSrcEmpty; + private boolean isMm = false; public CopyWork() { } @@ -65,4 +66,12 @@ public boolean isErrorOnSrcEmpty() { return errorOnSrcEmpty; } + public void setIsSourceMm(boolean isMm) { + this.isMm = isMm; + } + + public boolean isSourceMm() { + return isMm ; + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index 760906810e18..4b452b6ca249 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -94,7 +94,7 @@ public class CreateTableDesc extends DDLDesc implements Serializable { private boolean isCTAS = false; List primaryKeys; List foreignKeys; - private Long initialWriteId; + private Long initialMmWriteId; // Initial MM write ID for CTAS and import. // The FSOP configuration for the FSOP that is going to write initial data during ctas. // This is not needed beyond compilation, so it is transient. private transient FileSinkDesc writer; @@ -829,14 +829,16 @@ public Table toTable(HiveConf conf) throws HiveException { return tbl; } - public void setInitialWriteId(Long mmWriteId) { - this.initialWriteId = mmWriteId; + public void setInitialMmWriteId(Long mmWriteId) { + this.initialMmWriteId = mmWriteId; } - public Long getInitialWriteId() { - return initialWriteId; + public Long getInitialMmWriteId() { + return initialMmWriteId; } + + public FileSinkDesc getAndUnsetWriter() { FileSinkDesc fsd = writer; writer = null; @@ -846,6 +848,4 @@ public FileSinkDesc getAndUnsetWriter() { public void setWriter(FileSinkDesc writer) { this.writer = writer; } - - } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index bf858b6cfd54..1b7d325066a3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -47,6 +47,7 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc // TODO: the below seems like they should just be combined into partitionDesc private org.apache.hadoop.hive.ql.plan.TableDesc table; private Map partitionSpec; // NOTE: this partitionSpec has to be ordered map + private boolean commitMmWriteId = true; private LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, @@ -88,9 +89,8 @@ public LoadTableDesc(final Path sourcePath, */ public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, - final Map partitionSpec) { - // TODO# we assume mm=false here - this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID, null); + final Map partitionSpec, Long mmWriteId) { + this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID, mmWriteId); } public LoadTableDesc(final Path sourcePath, @@ -189,4 +189,12 @@ public AcidUtils.Operation getWriteType() { public Long getMmWriteId() { return mmWriteId; } + + public void setIntermediateInMmWrite(boolean b) { + this.commitMmWriteId = !b; + } + + public boolean isCommitMmWrite() { + return commitMmWriteId; + } } diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index 5377568461b5..6639aaa5c546 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -270,38 +270,93 @@ drop table load2_mm; drop table intermediate2; --- IMPORT - - - --- TODO# future --- ---create table exim_department ( dep_id int) stored as textfile; ---dfs -rmr target/tmp/ql/test/data/exports/exim_department; ---export table exim_department to 'ql/test/data/exports/exim_department'; ---drop table exim_department; ---create database importer; ---use importer; ---create table exim_department ( dep_id int) stored as textfile; ---import from 'ql/test/data/exports/exim_department'; --- --- ---create table exim_department ( dep_id int) stored as textfile; ---load data local inpath "../../data/files/test.dat" into table exim_department; ---dfs ${system:test.dfs.mkdir} target/tmp/ql/test/data/exports/exim_department/test; ---dfs -rmr target/tmp/ql/test/data/exports/exim_department; ---export table exim_department to 'ql/test/data/exports/exim_department'; ---drop table exim_department; --- ---create database importer; ---use importer; --- ---set hive.security.authorization.enabled=true; ---import from 'ql/test/data/exports/exim_department'; - - - --- TODO multi-insert, truncate +drop table intermediate_nonpart; +drop table intermmediate_part; +drop table intermmediate_nonpart; +create table intermediate_nonpart(key int, p int); +insert into intermediate_nonpart select * from intermediate; +create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true'); +insert into intermmediate_nonpart select * from intermediate; +create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true'); +insert into table intermmediate partition(p) select key, p from intermediate; + +set hive.exim.test.mode=true; + +export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'; +export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'; +export table intermediate to 'ql/test/data/exports/intermediate_part'; +export table intermmediate to 'ql/test/data/exports/intermmediate_part'; + +drop table intermediate_nonpart; +drop table intermmediate_part; +drop table intermmediate_nonpart; + +-- non-MM export to MM table, with and without partitions + +drop table import0_mm; +create table import0_mm(key int, p int) tblproperties('hivecommit'='true'); +import table import0_mm from 'ql/test/data/exports/intermediate_nonpart'; +select * from import0_mm order by key, p; +drop table import0_mm; + + + +drop table import1_mm; +create table import1_mm(key int) partitioned by (p int) + stored as orc tblproperties('hivecommit'='true'); +import table import1_mm from 'ql/test/data/exports/intermediate_part'; +select * from import1_mm order by key, p; +drop table import1_mm; + + +-- MM export into new MM table, non-part and part + +drop table import2_mm; +import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart'; +desc import2_mm; +select * from import2_mm order by key, p; +drop table import2_mm; + +drop table import3_mm; +import table import3_mm from 'ql/test/data/exports/intermmediate_part'; +desc import3_mm; +select * from import3_mm order by key, p; +drop table import3_mm; + +-- MM export into existing MM table, non-part and partial part + +drop table import4_mm; +create table import4_mm(key int, p int) tblproperties('hivecommit'='true'); +import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart'; +select * from import4_mm order by key, p; +drop table import4_mm; + +drop table import5_mm; +create table import5_mm(key int) partitioned by (p int) tblproperties('hivecommit'='true'); +import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part'; +select * from import5_mm order by key, p; +drop table import5_mm; + +-- MM export into existing non-MM table, non-part and part + +drop table import6_mm; +create table import6_mm(key int, p int); +import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart'; +select * from import6_mm order by key, p; +drop table import6_mm; + +drop table import7_mm; +create table import7_mm(key int) partitioned by (p int); +import table import7_mm from 'ql/test/data/exports/intermmediate_part'; +select * from import7_mm order by key, p; +drop table import7_mm; + + + +set hive.exim.test.mode=false; + + +-- TODO# multi-insert, truncate diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index 391017becafd..ab28d35ed53d 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -11,53 +11,42 @@ create table intermediate(key int) partitioned by (p int) stored as orc; insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; - - -drop table load0_mm; -create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true'); -load data local inpath '../../data/files/kv1.txt' into table load0_mm; -select count(1) from load0_mm; -load data local inpath '../../data/files/kv2.txt' into table load0_mm; -select count(1) from load0_mm; -load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm; -select count(1) from load0_mm; -drop table load0_mm; - - -drop table intermediate2; -create table intermediate2 (key string, value string) stored as textfile -location 'file:${system:test.tmp.dir}/intermediate2'; -load data local inpath '../../data/files/kv1.txt' into table intermediate2; -load data local inpath '../../data/files/kv2.txt' into table intermediate2; -load data local inpath '../../data/files/kv3.txt' into table intermediate2; - -drop table load1_mm; -create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true'); -load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' into table load1_mm; -load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv1.txt' into table load1_mm; -select count(1) from load1_mm; -load data local inpath '../../data/files/kv1.txt' into table intermediate2; -load data local inpath '../../data/files/kv2.txt' into table intermediate2; -load data local inpath '../../data/files/kv3.txt' into table intermediate2; -load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv*.txt' overwrite into table load1_mm; -select count(1) from load1_mm; -load data local inpath '../../data/files/kv2.txt' into table intermediate2; -load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' overwrite into table load1_mm; -select count(1) from load1_mm; -drop table load1_mm; - -drop table load2_mm; -create table load2_mm (key string, value string) - partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true'); -load data local inpath '../../data/files/kv1.txt' into table intermediate2; -load data local inpath '../../data/files/kv2.txt' into table intermediate2; -load data local inpath '../../data/files/kv3.txt' into table intermediate2; -load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv*.txt' into table load2_mm partition(k=5, l=5); -select count(1) from load2_mm; -drop table load2_mm; -drop table intermediate2; - - +drop table intermediate_nonpart; +drop table intermmediate_part; +drop table intermmediate_nonpart; + + +create table intermediate_nonpart(key int, p int); +insert into intermediate_nonpart select * from intermediate; +create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true'); +insert into intermmediate_nonpart select * from intermediate; +create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true'); +insert into table intermmediate partition(p) select key, p from intermediate; + +set hive.exim.test.mode=true; + +export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'; +export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'; +export table intermediate to 'ql/test/data/exports/intermediate_part'; +export table intermmediate to 'ql/test/data/exports/intermmediate_part'; + +drop table intermediate_nonpart; +drop table intermmediate_part; +drop table intermmediate_nonpart; + +-- MM export into existing non-MM table, non-part and part + +drop table import6_mm; +create table import6_mm(key int, p int); +import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart'; +select * from import6_mm order by key, p; +drop table import6_mm; + +drop table import7_mm; +create table import7_mm(key int) partitioned by (p int); +import table import7_mm from 'ql/test/data/exports/intermmediate_part'; +select * from import7_mm order by key, p; +drop table import7_mm; drop table intermediate; diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out index 656936971d62..7d770479f3b9 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -1924,37 +1924,541 @@ POSTHOOK: query: drop table intermediate2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@intermediate2 POSTHOOK: Output: default@intermediate2 -PREHOOK: query: -- IMPORT +PREHOOK: query: drop table intermediate_nonpart +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermediate_nonpart +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table intermmediate_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermmediate_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table intermmediate_nonpart +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermmediate_nonpart +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate_nonpart(key int, p int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermediate_nonpart +POSTHOOK: query: create table intermediate_nonpart(key int, p int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermediate_nonpart +PREHOOK: query: insert into intermediate_nonpart select * from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@intermediate_nonpart +POSTHOOK: query: insert into intermediate_nonpart select * from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@intermediate_nonpart +POSTHOOK: Lineage: intermediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermmediate_nonpart +POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermmediate_nonpart +PREHOOK: query: insert into intermmediate_nonpart select * from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@intermmediate_nonpart +POSTHOOK: query: insert into intermmediate_nonpart select * from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@intermmediate_nonpart +POSTHOOK: Lineage: intermmediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermmediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermmediate +POSTHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermmediate +PREHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@intermmediate +POSTHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@intermmediate@p=455 +POSTHOOK: Output: default@intermmediate@p=456 +POSTHOOK: Output: default@intermmediate@p=457 +POSTHOOK: Lineage: intermmediate PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermmediate PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermmediate PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermediate_nonpart +#### A masked pattern was here #### +POSTHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermediate_nonpart +#### A masked pattern was here #### +PREHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermmediate_nonpart +#### A masked pattern was here #### +POSTHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermmediate_nonpart +#### A masked pattern was here #### +PREHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +#### A masked pattern was here #### +POSTHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +#### A masked pattern was here #### +PREHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermmediate@p=455 +PREHOOK: Input: default@intermmediate@p=456 +PREHOOK: Input: default@intermmediate@p=457 +#### A masked pattern was here #### +POSTHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermmediate@p=455 +POSTHOOK: Input: default@intermmediate@p=456 +POSTHOOK: Input: default@intermmediate@p=457 +#### A masked pattern was here #### +PREHOOK: query: drop table intermediate_nonpart +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermediate_nonpart +PREHOOK: Output: default@intermediate_nonpart +POSTHOOK: query: drop table intermediate_nonpart +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermediate_nonpart +POSTHOOK: Output: default@intermediate_nonpart +PREHOOK: query: drop table intermmediate_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermmediate_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table intermmediate_nonpart +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermmediate_nonpart +PREHOOK: Output: default@intermmediate_nonpart +POSTHOOK: query: drop table intermmediate_nonpart +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermmediate_nonpart +POSTHOOK: Output: default@intermmediate_nonpart +PREHOOK: query: -- non-MM export to MM table, with and without partitions +drop table import0_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- non-MM export to MM table, with and without partitions +drop table import0_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import0_mm(key int, p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import0_mm +POSTHOOK: query: create table import0_mm(key int, p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import0_mm +PREHOOK: query: import table import0_mm from 'ql/test/data/exports/intermediate_nonpart' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import0_mm +POSTHOOK: query: import table import0_mm from 'ql/test/data/exports/intermediate_nonpart' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import0_mm +PREHOOK: query: select * from import0_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from import0_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import0_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import0_mm +PREHOOK: Output: default@import0_mm +POSTHOOK: query: drop table import0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import0_mm +POSTHOOK: Output: default@import0_mm +PREHOOK: query: drop table import1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table import1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import1_mm(key int) partitioned by (p int) + stored as orc tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import1_mm +POSTHOOK: query: create table import1_mm(key int) partitioned by (p int) + stored as orc tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import1_mm +PREHOOK: query: import table import1_mm from 'ql/test/data/exports/intermediate_part' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import1_mm +POSTHOOK: query: import table import1_mm from 'ql/test/data/exports/intermediate_part' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import1_mm +POSTHOOK: Output: default@import1_mm@p=455 +POSTHOOK: Output: default@import1_mm@p=456 +POSTHOOK: Output: default@import1_mm@p=457 +PREHOOK: query: select * from import1_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import1_mm +PREHOOK: Input: default@import1_mm@p=455 +PREHOOK: Input: default@import1_mm@p=456 +PREHOOK: Input: default@import1_mm@p=457 +#### A masked pattern was here #### +POSTHOOK: query: select * from import1_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import1_mm +POSTHOOK: Input: default@import1_mm@p=455 +POSTHOOK: Input: default@import1_mm@p=456 +POSTHOOK: Input: default@import1_mm@p=457 +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import1_mm +PREHOOK: Output: default@import1_mm +POSTHOOK: query: drop table import1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import1_mm +POSTHOOK: Output: default@import1_mm +PREHOOK: query: -- MM export into new MM table, non-part and part + +drop table import2_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- MM export into new MM table, non-part and part + +drop table import2_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: database:default +POSTHOOK: query: import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import2_mm +PREHOOK: query: desc import2_mm +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@import2_mm +POSTHOOK: query: desc import2_mm +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@import2_mm +key int +p int +PREHOOK: query: select * from import2_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from import2_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import2_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import2_mm +PREHOOK: Output: default@import2_mm +POSTHOOK: query: drop table import2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import2_mm +POSTHOOK: Output: default@import2_mm +PREHOOK: query: drop table import3_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table import3_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: import table import3_mm from 'ql/test/data/exports/intermmediate_part' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: database:default +POSTHOOK: query: import table import3_mm from 'ql/test/data/exports/intermmediate_part' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import3_mm +POSTHOOK: Output: default@import3_mm@p=455 +POSTHOOK: Output: default@import3_mm@p=456 +POSTHOOK: Output: default@import3_mm@p=457 +PREHOOK: query: desc import3_mm +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@import3_mm +POSTHOOK: query: desc import3_mm +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@import3_mm +key int +p int + +# Partition Information +# col_name data_type comment + +p int +PREHOOK: query: select * from import3_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import3_mm +PREHOOK: Input: default@import3_mm@p=455 +PREHOOK: Input: default@import3_mm@p=456 +PREHOOK: Input: default@import3_mm@p=457 +#### A masked pattern was here #### +POSTHOOK: query: select * from import3_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import3_mm +POSTHOOK: Input: default@import3_mm@p=455 +POSTHOOK: Input: default@import3_mm@p=456 +POSTHOOK: Input: default@import3_mm@p=457 +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import3_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import3_mm +PREHOOK: Output: default@import3_mm +POSTHOOK: query: drop table import3_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import3_mm +POSTHOOK: Output: default@import3_mm +PREHOOK: query: -- MM export into existing MM table, non-part and partial part --- TODO# future --- ---create table exim_department ( dep_id int) stored as textfile; -#### A masked pattern was here #### ---export table exim_department to 'ql/test/data/exports/exim_department'; ---drop table exim_department; ---create database importer; ---use importer; ---create table exim_department ( dep_id int) stored as textfile; ---import from 'ql/test/data/exports/exim_department'; --- --- ---create table exim_department ( dep_id int) stored as textfile; ---load data local inpath "../../data/files/test.dat" into table exim_department; -#### A masked pattern was here #### ---export table exim_department to 'ql/test/data/exports/exim_department'; ---drop table exim_department; --- ---create database importer; ---use importer; --- ---set hive.security.authorization.enabled=true; ---import from 'ql/test/data/exports/exim_department'; +drop table import4_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- MM export into existing MM table, non-part and partial part +drop table import4_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import4_mm(key int, p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import4_mm +POSTHOOK: query: create table import4_mm(key int, p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import4_mm +PREHOOK: query: import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import4_mm +POSTHOOK: query: import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import4_mm +PREHOOK: query: select * from import4_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import4_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from import4_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import4_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import4_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import4_mm +PREHOOK: Output: default@import4_mm +POSTHOOK: query: drop table import4_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import4_mm +POSTHOOK: Output: default@import4_mm +PREHOOK: query: drop table import5_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table import5_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import5_mm +POSTHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import5_mm +PREHOOK: query: import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import5_mm +POSTHOOK: query: import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import5_mm +POSTHOOK: Output: default@import5_mm@p=455 +PREHOOK: query: select * from import5_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import5_mm +PREHOOK: Input: default@import5_mm@p=455 +#### A masked pattern was here #### +POSTHOOK: query: select * from import5_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import5_mm +POSTHOOK: Input: default@import5_mm@p=455 +#### A masked pattern was here #### +97 455 +98 455 +PREHOOK: query: drop table import5_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import5_mm +PREHOOK: Output: default@import5_mm +POSTHOOK: query: drop table import5_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import5_mm +POSTHOOK: Output: default@import5_mm +PREHOOK: query: -- MM export into existing non-MM table, non-part and part +drop table import6_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- MM export into existing non-MM table, non-part and part --- TODO multi-insert, truncate +drop table import6_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import6_mm(key int, p int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import6_mm +POSTHOOK: query: create table import6_mm(key int, p int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import6_mm +PREHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import6_mm +POSTHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import6_mm +PREHOOK: query: select * from import6_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import6_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from import6_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import6_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import6_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import6_mm +PREHOOK: Output: default@import6_mm +POSTHOOK: query: drop table import6_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import6_mm +POSTHOOK: Output: default@import6_mm +PREHOOK: query: drop table import7_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table import7_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import7_mm(key int) partitioned by (p int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import7_mm +POSTHOOK: query: create table import7_mm(key int) partitioned by (p int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import7_mm +PREHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import7_mm +POSTHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import7_mm +POSTHOOK: Output: default@import7_mm@p=455 +POSTHOOK: Output: default@import7_mm@p=456 +POSTHOOK: Output: default@import7_mm@p=457 +PREHOOK: query: select * from import7_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import7_mm +PREHOOK: Input: default@import7_mm@p=455 +PREHOOK: Input: default@import7_mm@p=456 +PREHOOK: Input: default@import7_mm@p=457 +#### A masked pattern was here #### +POSTHOOK: query: select * from import7_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import7_mm +POSTHOOK: Input: default@import7_mm@p=455 +POSTHOOK: Input: default@import7_mm@p=456 +POSTHOOK: Input: default@import7_mm@p=457 +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import7_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import7_mm +PREHOOK: Output: default@import7_mm +POSTHOOK: query: drop table import7_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import7_mm +POSTHOOK: Output: default@import7_mm +PREHOOK: query: -- TODO# multi-insert, truncate @@ -1962,37 +2466,7 @@ drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: -- IMPORT - - - --- TODO# future --- ---create table exim_department ( dep_id int) stored as textfile; -#### A masked pattern was here #### ---export table exim_department to 'ql/test/data/exports/exim_department'; ---drop table exim_department; ---create database importer; ---use importer; ---create table exim_department ( dep_id int) stored as textfile; ---import from 'ql/test/data/exports/exim_department'; --- --- ---create table exim_department ( dep_id int) stored as textfile; ---load data local inpath "../../data/files/test.dat" into table exim_department; -#### A masked pattern was here #### ---export table exim_department to 'ql/test/data/exports/exim_department'; ---drop table exim_department; --- ---create database importer; ---use importer; --- ---set hive.security.authorization.enabled=true; ---import from 'ql/test/data/exports/exim_department'; - - - --- TODO multi-insert, truncate +POSTHOOK: query: -- TODO# multi-insert, truncate diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index 7ccc2ee4b66c..98f37a19ddd7 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -28,302 +28,231 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: drop table load0_mm +PREHOOK: query: drop table intermediate_nonpart PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table load0_mm +POSTHOOK: query: drop table intermediate_nonpart POSTHOOK: type: DROPTABLE -PREHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: query: drop table intermmediate_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermmediate_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table intermmediate_nonpart +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermmediate_nonpart +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate_nonpart(key int, p int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@load0_mm -POSTHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: Output: default@intermediate_nonpart +POSTHOOK: query: create table intermediate_nonpart(key int, p int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@load0_mm -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@load0_mm -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@load0_mm -PREHOOK: query: select count(1) from load0_mm +POSTHOOK: Output: default@intermediate_nonpart +PREHOOK: query: insert into intermediate_nonpart select * from intermediate PREHOOK: type: QUERY -PREHOOK: Input: default@load0_mm -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from load0_mm +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@intermediate_nonpart +POSTHOOK: query: insert into intermediate_nonpart select * from intermediate POSTHOOK: type: QUERY -POSTHOOK: Input: default@load0_mm -#### A masked pattern was here #### -500 -PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@load0_mm -POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@load0_mm -PREHOOK: query: select count(1) from load0_mm +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@intermediate_nonpart +POSTHOOK: Lineage: intermediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermmediate_nonpart +POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermmediate_nonpart +PREHOOK: query: insert into intermmediate_nonpart select * from intermediate PREHOOK: type: QUERY -PREHOOK: Input: default@load0_mm -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from load0_mm +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@intermmediate_nonpart +POSTHOOK: query: insert into intermmediate_nonpart select * from intermediate POSTHOOK: type: QUERY -POSTHOOK: Input: default@load0_mm -#### A masked pattern was here #### -1000 -PREHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@load0_mm -POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@load0_mm -PREHOOK: query: select count(1) from load0_mm +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@intermmediate_nonpart +POSTHOOK: Lineage: intermmediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermmediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermmediate +POSTHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermmediate +PREHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate PREHOOK: type: QUERY -PREHOOK: Input: default@load0_mm -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from load0_mm +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@intermmediate +POSTHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate POSTHOOK: type: QUERY -POSTHOOK: Input: default@load0_mm -#### A masked pattern was here #### -500 -PREHOOK: query: drop table load0_mm +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@intermmediate@p=455 +POSTHOOK: Output: default@intermmediate@p=456 +POSTHOOK: Lineage: intermmediate PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermmediate PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermediate_nonpart +#### A masked pattern was here #### +POSTHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermediate_nonpart +#### A masked pattern was here #### +PREHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermmediate_nonpart +#### A masked pattern was here #### +POSTHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermmediate_nonpart +#### A masked pattern was here #### +PREHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +#### A masked pattern was here #### +POSTHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +#### A masked pattern was here #### +PREHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermmediate@p=455 +PREHOOK: Input: default@intermmediate@p=456 +#### A masked pattern was here #### +POSTHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermmediate@p=455 +POSTHOOK: Input: default@intermmediate@p=456 +#### A masked pattern was here #### +PREHOOK: query: drop table intermediate_nonpart PREHOOK: type: DROPTABLE -PREHOOK: Input: default@load0_mm -PREHOOK: Output: default@load0_mm -POSTHOOK: query: drop table load0_mm +PREHOOK: Input: default@intermediate_nonpart +PREHOOK: Output: default@intermediate_nonpart +POSTHOOK: query: drop table intermediate_nonpart POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@load0_mm -POSTHOOK: Output: default@load0_mm -PREHOOK: query: drop table intermediate2 +POSTHOOK: Input: default@intermediate_nonpart +POSTHOOK: Output: default@intermediate_nonpart +PREHOOK: query: drop table intermmediate_part PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table intermediate2 +POSTHOOK: query: drop table intermmediate_part POSTHOOK: type: DROPTABLE -PREHOOK: query: create table intermediate2 (key string, value string) stored as textfile -#### A masked pattern was here #### -PREHOOK: type: CREATETABLE -#### A masked pattern was here #### -PREHOOK: Output: database:default -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: create table intermediate2 (key string, value string) stored as textfile -#### A masked pattern was here #### -POSTHOOK: type: CREATETABLE -#### A masked pattern was here #### -POSTHOOK: Output: database:default -POSTHOOK: Output: default@intermediate2 -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@intermediate2 -PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@intermediate2 -PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@intermediate2 -PREHOOK: query: drop table load1_mm +PREHOOK: query: drop table intermmediate_nonpart +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermmediate_nonpart +PREHOOK: Output: default@intermmediate_nonpart +POSTHOOK: query: drop table intermmediate_nonpart +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermmediate_nonpart +POSTHOOK: Output: default@intermmediate_nonpart +PREHOOK: query: -- MM export into existing non-MM table, non-part and part + +drop table import6_mm PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table load1_mm +POSTHOOK: query: -- MM export into existing non-MM table, non-part and part + +drop table import6_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: query: create table import6_mm(key int, p int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@load1_mm -POSTHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: Output: default@import6_mm +POSTHOOK: query: create table import6_mm(key int, p int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@load1_mm -#### A masked pattern was here #### -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@load1_mm -#### A masked pattern was here #### -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@load1_mm -#### A masked pattern was here #### -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@load1_mm -#### A masked pattern was here #### -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@load1_mm -PREHOOK: query: select count(1) from load1_mm -PREHOOK: type: QUERY -PREHOOK: Input: default@load1_mm -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from load1_mm -POSTHOOK: type: QUERY -POSTHOOK: Input: default@load1_mm -#### A masked pattern was here #### -1000 -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@intermediate2 -PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@intermediate2 -PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@intermediate2 -#### A masked pattern was here #### -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@load1_mm -#### A masked pattern was here #### -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@load1_mm -PREHOOK: query: select count(1) from load1_mm -PREHOOK: type: QUERY -PREHOOK: Input: default@load1_mm -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from load1_mm -POSTHOOK: type: QUERY -POSTHOOK: Input: default@load1_mm -#### A masked pattern was here #### -1050 -PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@intermediate2 -#### A masked pattern was here #### -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@load1_mm -#### A masked pattern was here #### -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@load1_mm -PREHOOK: query: select count(1) from load1_mm -PREHOOK: type: QUERY -PREHOOK: Input: default@load1_mm -#### A masked pattern was here #### -POSTHOOK: query: select count(1) from load1_mm -POSTHOOK: type: QUERY -POSTHOOK: Input: default@load1_mm -#### A masked pattern was here #### -500 -PREHOOK: query: drop table load1_mm -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@load1_mm -PREHOOK: Output: default@load1_mm -POSTHOOK: query: drop table load1_mm -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@load1_mm -POSTHOOK: Output: default@load1_mm -PREHOOK: query: drop table load2_mm +POSTHOOK: Output: default@import6_mm +PREHOOK: query: drop table import7_mm PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table load2_mm +POSTHOOK: query: drop table import7_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: create table load2_mm (key string, value string) - partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: query: create table import7_mm(key int) partitioned by (p int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@load2_mm -POSTHOOK: query: create table load2_mm (key string, value string) - partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: Output: default@import7_mm +POSTHOOK: query: create table import7_mm(key int) partitioned by (p int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@load2_mm -PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@intermediate2 -PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@intermediate2 -PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@intermediate2 -#### A masked pattern was here #### -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@load2_mm +POSTHOOK: Output: default@import7_mm +PREHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import6_mm +POSTHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import6_mm +PREHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import7_mm +POSTHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import7_mm +POSTHOOK: Output: default@import7_mm@p=455 +POSTHOOK: Output: default@import7_mm@p=456 +PREHOOK: query: select * from import6_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import6_mm #### A masked pattern was here #### -POSTHOOK: type: LOAD +POSTHOOK: query: select * from import6_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import6_mm #### A masked pattern was here #### -POSTHOOK: Output: default@load2_mm -POSTHOOK: Output: default@load2_mm@k=5/l=5 -PREHOOK: query: select count(1) from load2_mm +0 456 +10 456 +97 455 +98 455 +PREHOOK: query: select * from import7_mm order by key, p PREHOOK: type: QUERY -PREHOOK: Input: default@load2_mm -PREHOOK: Input: default@load2_mm@k=5/l=5 +PREHOOK: Input: default@import7_mm +PREHOOK: Input: default@import7_mm@p=455 +PREHOOK: Input: default@import7_mm@p=456 #### A masked pattern was here #### -POSTHOOK: query: select count(1) from load2_mm +POSTHOOK: query: select * from import7_mm order by key, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@load2_mm -POSTHOOK: Input: default@load2_mm@k=5/l=5 -#### A masked pattern was here #### -1025 -PREHOOK: query: drop table load2_mm +POSTHOOK: Input: default@import7_mm +POSTHOOK: Input: default@import7_mm@p=455 +POSTHOOK: Input: default@import7_mm@p=456 +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +PREHOOK: query: drop table import6_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@load2_mm -PREHOOK: Output: default@load2_mm -POSTHOOK: query: drop table load2_mm +PREHOOK: Input: default@import6_mm +PREHOOK: Output: default@import6_mm +POSTHOOK: query: drop table import6_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@load2_mm -POSTHOOK: Output: default@load2_mm -PREHOOK: query: drop table intermediate2 +POSTHOOK: Input: default@import6_mm +POSTHOOK: Output: default@import6_mm +PREHOOK: query: drop table import7_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@intermediate2 -PREHOOK: Output: default@intermediate2 -POSTHOOK: query: drop table intermediate2 +PREHOOK: Input: default@import7_mm +PREHOOK: Output: default@import7_mm +POSTHOOK: query: drop table import7_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@intermediate2 -POSTHOOK: Output: default@intermediate2 +POSTHOOK: Input: default@import7_mm +POSTHOOK: Output: default@import7_mm PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate From ff1ea20cb72e7e754fca1fc00e712adec257a204 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Thu, 20 Oct 2016 15:07:39 -0700 Subject: [PATCH 17/24] HIVE-15020 : handle truncate for MM tables (not atomic yet) (Sergey Shelukhin) --- .../hadoop/hive/ql/exec/TaskFactory.java | 1 + .../hive/ql/parse/DDLSemanticAnalyzer.java | 9 +- .../queries/clientnegative/mm_truncate_cols.q | 3 + ql/src/test/queries/clientpositive/mm_all.q | 20 ++- .../clientnegative/mm_truncate_cols.q.out | 9 ++ .../results/clientpositive/llap/mm_all.q.out | 152 ++++++++++++------ 6 files changed, 136 insertions(+), 58 deletions(-) create mode 100644 ql/src/test/queries/clientnegative/mm_truncate_cols.q create mode 100644 ql/src/test/results/clientnegative/mm_truncate_cols.q.out diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index 822ff41c2c96..1cd20e392a02 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -147,6 +147,7 @@ public static Task get(Class workClass, throw new RuntimeException("No task for work class " + workClass.getName()); } + @SafeVarargs public static Task get(T work, HiveConf conf, Task... tasklist) { Task ret = get((Class) work.getClass(), conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index a018b5432f01..038cbbf8c067 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -963,6 +963,10 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { if (indexes != null && indexes.size() > 0) { throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg()); } + // It would be possible to support this, but this is such a pointless command. + if (MetaStoreUtils.isMmTable(table.getParameters())) { + throw new SemanticException("Truncating MM table columns not presently supported"); + } List bucketCols = null; Class inputFormatClass = null; @@ -1060,12 +1064,11 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { // so the operation is atomic. Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc); truncateTblDesc.setOutputDir(queryTmpdir); - // TODO# movetask is created here; handle MM tables LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, partSpec == null ? new HashMap() : partSpec, null); ltd.setLbCtx(lbCtx); - Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), - conf); + @SuppressWarnings("unchecked") + Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); truncateTask.addDependentTask(moveTsk); // Recalculate the HDFS stats if auto gather stats is set diff --git a/ql/src/test/queries/clientnegative/mm_truncate_cols.q b/ql/src/test/queries/clientnegative/mm_truncate_cols.q new file mode 100644 index 000000000000..178011827fa6 --- /dev/null +++ b/ql/src/test/queries/clientnegative/mm_truncate_cols.q @@ -0,0 +1,3 @@ +CREATE TABLE mm_table(key int, value string) tblproperties('hivecommit'='true'); + +TRUNCATE TABLE mm_table COLUMNS (value); diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index 6639aaa5c546..d44013666253 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -21,6 +21,10 @@ insert into table part_mm partition(key_mm='455') select key from intermediate; insert into table part_mm partition(key_mm='456') select key from intermediate; insert into table part_mm partition(key_mm='455') select key from intermediate; select * from part_mm order by key, key_mm; +truncate table part_mm partition(key_mm='455'); +select * from part_mm order by key, key_mm; +truncate table part_mm; +select * from part_mm order by key, key_mm; drop table part_mm; drop table simple_mm; @@ -30,6 +34,8 @@ insert overwrite table simple_mm select key from intermediate; select * from simple_mm order by key; insert into table simple_mm select key from intermediate; select * from simple_mm order by key; +truncate table simple_mm; +select * from simple_mm; drop table simple_mm; @@ -201,9 +207,9 @@ drop table iow0_mm; create table iow0_mm(key int) tblproperties('hivecommit'='true'); insert overwrite table iow0_mm select key from intermediate; insert into table iow0_mm select key + 1 from intermediate; -select * from iow0_mm; +select * from iow0_mm order by key; insert overwrite table iow0_mm select key + 2 from intermediate; -select * from iow0_mm; +select * from iow0_mm order by key; drop table iow0_mm; @@ -213,13 +219,13 @@ insert overwrite table iow1_mm partition (key2) select key as k1, key from intermediate union all select key as k1, key from intermediate; insert into table iow1_mm partition (key2) select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate; -select * from iow1_mm; +select * from iow1_mm order by key, key2; insert overwrite table iow1_mm partition (key2) select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate; -select * from iow1_mm; +select * from iow1_mm order by key, key2; insert overwrite table iow1_mm partition (key2) select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate; -select * from iow1_mm; +select * from iow1_mm order by key, key2; drop table iow1_mm; @@ -351,12 +357,10 @@ import table import7_mm from 'ql/test/data/exports/intermmediate_part'; select * from import7_mm order by key, p; drop table import7_mm; - - set hive.exim.test.mode=false; --- TODO# multi-insert, truncate +-- TODO# multi-insert diff --git a/ql/src/test/results/clientnegative/mm_truncate_cols.q.out b/ql/src/test/results/clientnegative/mm_truncate_cols.q.out new file mode 100644 index 000000000000..015f251c8fc9 --- /dev/null +++ b/ql/src/test/results/clientnegative/mm_truncate_cols.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: CREATE TABLE mm_table(key int, value string) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mm_table +POSTHOOK: query: CREATE TABLE mm_table(key int, value string) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mm_table +FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: Truncating MM table columns not presently supported diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out index 7d770479f3b9..bdd365d2df7d 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -180,6 +180,50 @@ POSTHOOK: Input: default@part_mm@key_mm=456 103 455 103 455 103 456 +PREHOOK: query: truncate table part_mm partition(key_mm='455') +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: query: truncate table part_mm partition(key_mm='455') +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@part_mm@key_mm=455 +PREHOOK: query: select * from part_mm order by key, key_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@part_mm +PREHOOK: Input: default@part_mm@key_mm=455 +PREHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from part_mm order by key, key_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part_mm +POSTHOOK: Input: default@part_mm@key_mm=455 +POSTHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +0 456 +10 456 +97 456 +98 456 +100 456 +103 456 +PREHOOK: query: truncate table part_mm +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@part_mm@key_mm=455 +PREHOOK: Output: default@part_mm@key_mm=456 +POSTHOOK: query: truncate table part_mm +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: Output: default@part_mm@key_mm=456 +PREHOOK: query: select * from part_mm order by key, key_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@part_mm +PREHOOK: Input: default@part_mm@key_mm=455 +PREHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from part_mm order by key, key_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part_mm +POSTHOOK: Input: default@part_mm@key_mm=455 +POSTHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### PREHOOK: query: drop table part_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@part_mm @@ -279,6 +323,20 @@ POSTHOOK: Input: default@simple_mm 100 103 103 +PREHOOK: query: truncate table simple_mm +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@simple_mm +POSTHOOK: query: truncate table simple_mm +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@simple_mm +PREHOOK: query: select * from simple_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@simple_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from simple_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@simple_mm +#### A masked pattern was here #### PREHOOK: query: drop table simple_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@simple_mm @@ -1299,26 +1357,26 @@ POSTHOOK: Input: default@intermediate@p=456 POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@iow0_mm POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from iow0_mm +PREHOOK: query: select * from iow0_mm order by key PREHOOK: type: QUERY PREHOOK: Input: default@iow0_mm #### A masked pattern was here #### -POSTHOOK: query: select * from iow0_mm +POSTHOOK: query: select * from iow0_mm order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@iow0_mm #### A masked pattern was here #### -98 -97 -100 -103 0 +1 10 -99 +11 +97 98 +98 +99 +100 101 +103 104 -1 -11 PREHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate PREHOOK: type: QUERY PREHOOK: Input: default@intermediate @@ -1334,20 +1392,20 @@ POSTHOOK: Input: default@intermediate@p=456 POSTHOOK: Input: default@intermediate@p=457 POSTHOOK: Output: default@iow0_mm POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from iow0_mm +PREHOOK: query: select * from iow0_mm order by key PREHOOK: type: QUERY PREHOOK: Input: default@iow0_mm #### A masked pattern was here #### -POSTHOOK: query: select * from iow0_mm +POSTHOOK: query: select * from iow0_mm order by key POSTHOOK: type: QUERY POSTHOOK: Input: default@iow0_mm #### A masked pattern was here #### -100 +2 +12 99 +100 102 105 -2 -12 PREHOOK: query: drop table iow0_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@iow0_mm @@ -1422,7 +1480,7 @@ POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)int POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from iow1_mm +PREHOOK: query: select * from iow1_mm order by key, key2 PREHOOK: type: QUERY PREHOOK: Input: default@iow1_mm PREHOOK: Input: default@iow1_mm@key2=0 @@ -1432,7 +1490,7 @@ PREHOOK: Input: default@iow1_mm@key2=103 PREHOOK: Input: default@iow1_mm@key2=97 PREHOOK: Input: default@iow1_mm@key2=98 #### A masked pattern was here #### -POSTHOOK: query: select * from iow1_mm +POSTHOOK: query: select * from iow1_mm order by key, key2 POSTHOOK: type: QUERY POSTHOOK: Input: default@iow1_mm POSTHOOK: Input: default@iow1_mm@key2=0 @@ -1444,28 +1502,28 @@ POSTHOOK: Input: default@iow1_mm@key2=98 #### A masked pattern was here #### 0 0 0 0 -1 0 0 0 +1 0 10 10 10 10 -11 10 10 10 -100 100 -100 100 -101 100 -100 100 -103 103 -103 103 -104 103 -103 103 +11 10 97 97 97 97 -98 97 97 97 +98 97 98 98 98 98 -99 98 98 98 +99 98 +100 100 +100 100 +100 100 +101 100 +103 103 +103 103 +103 103 +104 103 PREHOOK: query: insert overwrite table iow1_mm partition (key2) select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate PREHOOK: type: QUERY @@ -1493,7 +1551,7 @@ POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)int POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from iow1_mm +PREHOOK: query: select * from iow1_mm order by key, key2 PREHOOK: type: QUERY PREHOOK: Input: default@iow1_mm PREHOOK: Input: default@iow1_mm@key2=0 @@ -1503,7 +1561,7 @@ PREHOOK: Input: default@iow1_mm@key2=103 PREHOOK: Input: default@iow1_mm@key2=97 PREHOOK: Input: default@iow1_mm@key2=98 #### A masked pattern was here #### -POSTHOOK: query: select * from iow1_mm +POSTHOOK: query: select * from iow1_mm order by key, key2 POSTHOOK: type: QUERY POSTHOOK: Input: default@iow1_mm POSTHOOK: Input: default@iow1_mm@key2=0 @@ -1517,14 +1575,14 @@ POSTHOOK: Input: default@iow1_mm@key2=98 4 0 13 10 14 10 -103 100 -104 100 -106 103 -107 103 100 97 101 97 101 98 102 98 +103 100 +104 100 +106 103 +107 103 PREHOOK: query: insert overwrite table iow1_mm partition (key2) select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate PREHOOK: type: QUERY @@ -1562,7 +1620,7 @@ POSTHOOK: Lineage: iow1_mm PARTITION(key2=13).key EXPRESSION [(intermediate)inte POSTHOOK: Lineage: iow1_mm PARTITION(key2=2).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: iow1_mm PARTITION(key2=3).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: iow1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: select * from iow1_mm +PREHOOK: query: select * from iow1_mm order by key, key2 PREHOOK: type: QUERY PREHOOK: Input: default@iow1_mm PREHOOK: Input: default@iow1_mm@key2=0 @@ -1581,7 +1639,7 @@ PREHOOK: Input: default@iow1_mm@key2=97 PREHOOK: Input: default@iow1_mm@key2=98 PREHOOK: Input: default@iow1_mm@key2=99 #### A masked pattern was here #### -POSTHOOK: query: select * from iow1_mm +POSTHOOK: query: select * from iow1_mm order by key, key2 POSTHOOK: type: QUERY POSTHOOK: Input: default@iow1_mm POSTHOOK: Input: default@iow1_mm@key2=0 @@ -1600,26 +1658,26 @@ POSTHOOK: Input: default@iow1_mm@key2=97 POSTHOOK: Input: default@iow1_mm@key2=98 POSTHOOK: Input: default@iow1_mm@key2=99 #### A masked pattern was here #### +2 2 3 0 +3 3 4 0 +12 12 13 10 +13 13 14 10 +99 99 +100 97 100 100 100 100 +101 97 +101 98 101 101 +102 98 102 102 103 103 105 105 106 106 -12 12 -13 13 -2 2 -3 3 -100 97 -101 97 -101 98 -102 98 -99 99 PREHOOK: query: drop table iow1_mm PREHOOK: type: DROPTABLE PREHOOK: Input: default@iow1_mm @@ -2458,7 +2516,7 @@ POSTHOOK: query: drop table import7_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@import7_mm POSTHOOK: Output: default@import7_mm -PREHOOK: query: -- TODO# multi-insert, truncate +PREHOOK: query: -- TODO# multi-insert @@ -2466,7 +2524,7 @@ drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: -- TODO# multi-insert, truncate +POSTHOOK: query: -- TODO# multi-insert From b60bbc28afc3e683e8e146a6ef9999fd5467b2ca Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Thu, 20 Oct 2016 15:08:48 -0700 Subject: [PATCH 18/24] HIVE-15021 : handle (or add a test for) multi-insert into MM tables (Sergey Shelukhin) --- ql/src/test/queries/clientpositive/mm_all.q | 57 +- .../test/queries/clientpositive/mm_current.q | 89 ++-- .../results/clientpositive/llap/mm_all.q.out | 463 +++++++++++++++- .../clientpositive/llap/mm_current.q.out | 495 ++++++++++++------ 4 files changed, 888 insertions(+), 216 deletions(-) diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q index d44013666253..9d1bf8a9e96a 100644 --- a/ql/src/test/queries/clientpositive/mm_all.q +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -360,7 +360,62 @@ drop table import7_mm; set hive.exim.test.mode=false; --- TODO# multi-insert + +drop table multi0_1_mm; +drop table multi0_2_mm; +create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true'); +create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true'); + +from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key; + +select * from multi0_1_mm order by key, key2; +select * from multi0_2_mm order by key, key2; + +set hive.merge.mapredfiles=true; +set hive.merge.sparkfiles=true; +set hive.merge.tezfiles=true; + +from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p; +select * from multi0_1_mm order by key, key2; +select * from multi0_2_mm order by key, key2; + +set hive.merge.mapredfiles=false; +set hive.merge.sparkfiles=false; +set hive.merge.tezfiles=false; + +drop table multi0_1_mm; +drop table multi0_2_mm; + + +drop table multi1_mm; +create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true'); +from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p; +select * from multi1_mm order by key, key2, p; +from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p; +select * from multi1_mm order by key, key2, p; + +from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p; +select key, key2, p from multi1_mm order by key, key2, p; + +from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p; +select key, key2, p from multi1_mm order by key, key2, p; +drop table multi1_mm; + + + + diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q index ab28d35ed53d..bb166cfe1367 100644 --- a/ql/src/test/queries/clientpositive/mm_current.q +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -11,42 +11,59 @@ create table intermediate(key int) partitioned by (p int) stored as orc; insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; -drop table intermediate_nonpart; -drop table intermmediate_part; -drop table intermmediate_nonpart; - - -create table intermediate_nonpart(key int, p int); -insert into intermediate_nonpart select * from intermediate; -create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true'); -insert into intermmediate_nonpart select * from intermediate; -create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true'); -insert into table intermmediate partition(p) select key, p from intermediate; - -set hive.exim.test.mode=true; - -export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'; -export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'; -export table intermediate to 'ql/test/data/exports/intermediate_part'; -export table intermmediate to 'ql/test/data/exports/intermmediate_part'; - -drop table intermediate_nonpart; -drop table intermmediate_part; -drop table intermmediate_nonpart; - --- MM export into existing non-MM table, non-part and part - -drop table import6_mm; -create table import6_mm(key int, p int); -import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart'; -select * from import6_mm order by key, p; -drop table import6_mm; - -drop table import7_mm; -create table import7_mm(key int) partitioned by (p int); -import table import7_mm from 'ql/test/data/exports/intermmediate_part'; -select * from import7_mm order by key, p; -drop table import7_mm; + +drop table multi0_1_mm; +drop table multi0_2_mm; +create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true'); +create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true'); + +from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key; + +select * from multi0_1_mm order by key, key2; +select * from multi0_2_mm order by key, key2; + +set hive.merge.mapredfiles=true; +set hive.merge.sparkfiles=true; +set hive.merge.tezfiles=true; + +from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p; +select * from multi0_1_mm order by key, key2; +select * from multi0_2_mm order by key, key2; + +set hive.merge.mapredfiles=false; +set hive.merge.sparkfiles=false; +set hive.merge.tezfiles=false; + +drop table multi0_1_mm; +drop table multi0_2_mm; + + +drop table multi1_mm; +create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true'); +from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p; +select * from multi1_mm order by key, key2, p; +from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p; +select * from multi1_mm order by key, key2, p; + +from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p; +select key, key2, p from multi1_mm order by key, key2, p; + +from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p; +select key, key2, p from multi1_mm order by key, key2, p; +drop table multi1_mm; + drop table intermediate; diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out index bdd365d2df7d..57c878ca0816 100644 --- a/ql/src/test/results/clientpositive/llap/mm_all.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -2516,19 +2516,462 @@ POSTHOOK: query: drop table import7_mm POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@import7_mm POSTHOOK: Output: default@import7_mm -PREHOOK: query: -- TODO# multi-insert - - - -drop table intermediate +PREHOOK: query: drop table multi0_1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table multi0_1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table multi0_2_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table multi0_2_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@multi0_1_mm +POSTHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@multi0_1_mm +PREHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@multi0_2_mm +PREHOOK: query: from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi0_1_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi0_1_mm +POSTHOOK: Output: default@multi0_2_mm +POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from multi0_1_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: select * from multi0_2_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_2_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +455 97 +455 98 +456 0 +456 10 +457 100 +457 103 +PREHOOK: query: from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi0_1_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi0_1_mm +POSTHOOK: Output: default@multi0_2_mm +POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from multi0_1_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +455 97 +455 98 +456 0 +456 10 +457 100 +457 103 +PREHOOK: query: select * from multi0_2_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_2_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table multi0_1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@multi0_1_mm +PREHOOK: Output: default@multi0_1_mm +POSTHOOK: query: drop table multi0_1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@multi0_1_mm +POSTHOOK: Output: default@multi0_1_mm +PREHOOK: query: drop table multi0_2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@multi0_2_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: drop table multi0_2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@multi0_2_mm +POSTHOOK: Output: default@multi0_2_mm +PREHOOK: query: drop table multi1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table multi1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@multi1_mm +POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@multi1_mm +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi1_mm@p=1 +PREHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +POSTHOOK: query: select * from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +0 456 2 +10 456 2 +97 455 2 +98 455 2 +100 457 2 +103 457 2 +455 97 1 +455 98 1 +456 0 1 +456 10 1 +457 100 1 +457 103 1 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi1_mm@p=1 +PREHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +POSTHOOK: query: select * from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +0 456 1 +0 456 2 +10 456 1 +10 456 2 +97 455 1 +97 455 2 +98 455 1 +98 455 2 +100 457 1 +100 457 2 +103 457 1 +103 457 2 +455 97 1 +455 97 2 +455 98 1 +455 98 2 +456 0 1 +456 0 2 +456 10 1 +456 10 2 +457 100 1 +457 100 2 +457 103 1 +457 103 2 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi1_mm +PREHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=455 +POSTHOOK: Output: default@multi1_mm@p=456 +POSTHOOK: Output: default@multi1_mm@p=457 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=457).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +PREHOOK: Input: default@multi1_mm@p=455 +PREHOOK: Input: default@multi1_mm@p=456 +PREHOOK: Input: default@multi1_mm@p=457 +#### A masked pattern was here #### +POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +POSTHOOK: Input: default@multi1_mm@p=455 +POSTHOOK: Input: default@multi1_mm@p=456 +POSTHOOK: Input: default@multi1_mm@p=457 +#### A masked pattern was here #### +0 456 1 +0 456 1 +0 456 2 +10 456 1 +10 456 1 +10 456 2 +97 455 1 +97 455 1 +97 455 2 +98 455 1 +98 455 1 +98 455 2 +100 457 1 +100 457 1 +100 457 2 +103 457 1 +103 457 1 +103 457 2 +455 97 1 +455 97 2 +455 97 455 +455 98 1 +455 98 2 +455 98 455 +456 0 1 +456 0 2 +456 0 456 +456 10 1 +456 10 2 +456 10 456 +457 100 1 +457 100 2 +457 100 457 +457 103 1 +457 103 2 +457 103 457 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi1_mm +PREHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +PREHOOK: Input: default@multi1_mm@p=455 +PREHOOK: Input: default@multi1_mm@p=456 +PREHOOK: Input: default@multi1_mm@p=457 +#### A masked pattern was here #### +POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +POSTHOOK: Input: default@multi1_mm@p=455 +POSTHOOK: Input: default@multi1_mm@p=456 +POSTHOOK: Input: default@multi1_mm@p=457 +#### A masked pattern was here #### +0 456 1 +0 456 1 +0 456 1 +0 456 2 +10 456 1 +10 456 1 +10 456 1 +10 456 2 +97 455 1 +97 455 1 +97 455 1 +97 455 2 +98 455 1 +98 455 1 +98 455 1 +98 455 2 +100 457 1 +100 457 1 +100 457 1 +100 457 2 +103 457 1 +103 457 1 +103 457 1 +103 457 2 +455 97 1 +455 97 1 +455 97 2 +455 97 455 +455 98 1 +455 98 1 +455 98 2 +455 98 455 +456 0 1 +456 0 1 +456 0 2 +456 0 456 +456 10 1 +456 10 1 +456 10 2 +456 10 456 +457 100 1 +457 100 1 +457 100 2 +457 100 457 +457 103 1 +457 103 1 +457 103 2 +457 103 457 +PREHOOK: query: drop table multi1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@multi1_mm +PREHOOK: Output: default@multi1_mm +POSTHOOK: query: drop table multi1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Output: default@multi1_mm +PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate PREHOOK: Output: default@intermediate -POSTHOOK: query: -- TODO# multi-insert - - - -drop table intermediate +POSTHOOK: query: drop table intermediate POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@intermediate POSTHOOK: Output: default@intermediate diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out index 98f37a19ddd7..0522288b31b7 100644 --- a/ql/src/test/results/clientpositive/llap/mm_current.q.out +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -28,231 +28,388 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@intermediate@p=456 POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: drop table intermediate_nonpart +PREHOOK: query: drop table multi0_1_mm PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table intermediate_nonpart +POSTHOOK: query: drop table multi0_1_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table intermmediate_part +PREHOOK: query: drop table multi0_2_mm PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table intermmediate_part +POSTHOOK: query: drop table multi0_2_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table intermmediate_nonpart -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table intermmediate_nonpart -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table intermediate_nonpart(key int, p int) +PREHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='false') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@intermediate_nonpart -POSTHOOK: query: create table intermediate_nonpart(key int, p int) +PREHOOK: Output: default@multi0_1_mm +POSTHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='false') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@intermediate_nonpart -PREHOOK: query: insert into intermediate_nonpart select * from intermediate -PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@intermediate_nonpart -POSTHOOK: query: insert into intermediate_nonpart select * from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@intermediate_nonpart -POSTHOOK: Lineage: intermediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: intermediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true') +POSTHOOK: Output: default@multi0_1_mm +PREHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='false') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@intermmediate_nonpart -POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true') +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='false') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@intermmediate_nonpart -PREHOOK: query: insert into intermmediate_nonpart select * from intermediate +POSTHOOK: Output: default@multi0_2_mm +PREHOOK: query: from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key PREHOOK: type: QUERY PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@intermmediate_nonpart -POSTHOOK: query: insert into intermmediate_nonpart select * from intermediate +PREHOOK: Output: default@multi0_1_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key POSTHOOK: type: QUERY POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@intermmediate_nonpart -POSTHOOK: Lineage: intermmediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: intermmediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] -PREHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true') -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@intermmediate -POSTHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true') -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@intermmediate -PREHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate +POSTHOOK: Output: default@multi0_1_mm +POSTHOOK: Output: default@multi0_2_mm +POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from multi0_1_mm order by key, key2 PREHOOK: type: QUERY -PREHOOK: Input: default@intermediate -PREHOOK: Input: default@intermediate@p=455 -PREHOOK: Input: default@intermediate@p=456 -PREHOOK: Output: default@intermmediate -POSTHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate -POSTHOOK: type: QUERY -POSTHOOK: Input: default@intermediate -POSTHOOK: Input: default@intermediate@p=455 -POSTHOOK: Input: default@intermediate@p=456 -POSTHOOK: Output: default@intermmediate@p=455 -POSTHOOK: Output: default@intermmediate@p=456 -POSTHOOK: Lineage: intermmediate PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -POSTHOOK: Lineage: intermmediate PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] -PREHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart' -PREHOOK: type: EXPORT -PREHOOK: Input: default@intermediate_nonpart +PREHOOK: Input: default@multi0_1_mm #### A masked pattern was here #### -POSTHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart' -POSTHOOK: type: EXPORT -POSTHOOK: Input: default@intermediate_nonpart +POSTHOOK: query: select * from multi0_1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_1_mm #### A masked pattern was here #### -PREHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart' -PREHOOK: type: EXPORT -PREHOOK: Input: default@intermmediate_nonpart +0 456 +10 456 +97 455 +98 455 +PREHOOK: query: select * from multi0_2_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_2_mm #### A masked pattern was here #### -POSTHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart' -POSTHOOK: type: EXPORT -POSTHOOK: Input: default@intermmediate_nonpart +POSTHOOK: query: select * from multi0_2_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_2_mm #### A masked pattern was here #### -PREHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part' -PREHOOK: type: EXPORT +455 97 +455 98 +456 0 +456 10 +PREHOOK: query: from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate PREHOOK: Input: default@intermediate@p=455 PREHOOK: Input: default@intermediate@p=456 -#### A masked pattern was here #### -POSTHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part' -POSTHOOK: type: EXPORT +PREHOOK: Output: default@multi0_1_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate POSTHOOK: Input: default@intermediate@p=455 POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi0_1_mm +POSTHOOK: Output: default@multi0_2_mm +POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from multi0_1_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_1_mm #### A masked pattern was here #### -PREHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part' -PREHOOK: type: EXPORT -PREHOOK: Input: default@intermmediate@p=455 -PREHOOK: Input: default@intermmediate@p=456 +POSTHOOK: query: select * from multi0_1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_1_mm #### A masked pattern was here #### -POSTHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part' -POSTHOOK: type: EXPORT -POSTHOOK: Input: default@intermmediate@p=455 -POSTHOOK: Input: default@intermmediate@p=456 +0 456 +10 456 +97 455 +98 455 +455 97 +455 98 +456 0 +456 10 +PREHOOK: query: select * from multi0_2_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_2_mm #### A masked pattern was here #### -PREHOOK: query: drop table intermediate_nonpart -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@intermediate_nonpart -PREHOOK: Output: default@intermediate_nonpart -POSTHOOK: query: drop table intermediate_nonpart -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@intermediate_nonpart -POSTHOOK: Output: default@intermediate_nonpart -PREHOOK: query: drop table intermmediate_part -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table intermmediate_part -POSTHOOK: type: DROPTABLE -PREHOOK: query: drop table intermmediate_nonpart +POSTHOOK: query: select * from multi0_2_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +PREHOOK: query: drop table multi0_1_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@intermmediate_nonpart -PREHOOK: Output: default@intermmediate_nonpart -POSTHOOK: query: drop table intermmediate_nonpart +PREHOOK: Input: default@multi0_1_mm +PREHOOK: Output: default@multi0_1_mm +POSTHOOK: query: drop table multi0_1_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@intermmediate_nonpart -POSTHOOK: Output: default@intermmediate_nonpart -PREHOOK: query: -- MM export into existing non-MM table, non-part and part - -drop table import6_mm +POSTHOOK: Input: default@multi0_1_mm +POSTHOOK: Output: default@multi0_1_mm +PREHOOK: query: drop table multi0_2_mm PREHOOK: type: DROPTABLE -POSTHOOK: query: -- MM export into existing non-MM table, non-part and part - -drop table import6_mm +PREHOOK: Input: default@multi0_2_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: drop table multi0_2_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: create table import6_mm(key int, p int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@import6_mm -POSTHOOK: query: create table import6_mm(key int, p int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@import6_mm -PREHOOK: query: drop table import7_mm +POSTHOOK: Input: default@multi0_2_mm +POSTHOOK: Output: default@multi0_2_mm +PREHOOK: query: drop table multi1_mm PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table import7_mm +POSTHOOK: query: drop table multi1_mm POSTHOOK: type: DROPTABLE -PREHOOK: query: create table import7_mm(key int) partitioned by (p int) +PREHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='false') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default -PREHOOK: Output: default@import7_mm -POSTHOOK: query: create table import7_mm(key int) partitioned by (p int) +PREHOOK: Output: default@multi1_mm +POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='false') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default -POSTHOOK: Output: default@import7_mm -PREHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart' -PREHOOK: type: IMPORT +POSTHOOK: Output: default@multi1_mm +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@multi1_mm@p=1 +PREHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 #### A masked pattern was here #### -PREHOOK: Output: default@import6_mm -POSTHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart' -POSTHOOK: type: IMPORT +POSTHOOK: query: select * from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 #### A masked pattern was here #### -POSTHOOK: Output: default@import6_mm -PREHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part' -PREHOOK: type: IMPORT +0 456 2 +10 456 2 +97 455 2 +98 455 2 +455 97 1 +455 98 1 +456 0 1 +456 10 1 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@multi1_mm@p=1 +PREHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 #### A masked pattern was here #### -PREHOOK: Output: default@import7_mm -POSTHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part' -POSTHOOK: type: IMPORT +POSTHOOK: query: select * from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 #### A masked pattern was here #### -POSTHOOK: Output: default@import7_mm -POSTHOOK: Output: default@import7_mm@p=455 -POSTHOOK: Output: default@import7_mm@p=456 -PREHOOK: query: select * from import6_mm order by key, p +0 456 1 +0 456 2 +10 456 1 +10 456 2 +97 455 1 +97 455 2 +98 455 1 +98 455 2 +455 97 1 +455 97 2 +455 98 1 +455 98 2 +456 0 1 +456 0 2 +456 10 1 +456 10 2 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@multi1_mm +PREHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=455 +POSTHOOK: Output: default@multi1_mm@p=456 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p PREHOOK: type: QUERY -PREHOOK: Input: default@import6_mm +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +PREHOOK: Input: default@multi1_mm@p=455 +PREHOOK: Input: default@multi1_mm@p=456 #### A masked pattern was here #### -POSTHOOK: query: select * from import6_mm order by key, p +POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@import6_mm +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +POSTHOOK: Input: default@multi1_mm@p=455 +POSTHOOK: Input: default@multi1_mm@p=456 #### A masked pattern was here #### -0 456 -10 456 -97 455 -98 455 -PREHOOK: query: select * from import7_mm order by key, p +0 456 1 +0 456 1 +0 456 2 +10 456 1 +10 456 1 +10 456 2 +97 455 1 +97 455 1 +97 455 2 +98 455 1 +98 455 1 +98 455 2 +455 97 1 +455 97 2 +455 97 455 +455 98 1 +455 98 2 +455 98 455 +456 0 1 +456 0 2 +456 0 456 +456 10 1 +456 10 2 +456 10 456 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p PREHOOK: type: QUERY -PREHOOK: Input: default@import7_mm -PREHOOK: Input: default@import7_mm@p=455 -PREHOOK: Input: default@import7_mm@p=456 +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@multi1_mm +PREHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +PREHOOK: Input: default@multi1_mm@p=455 +PREHOOK: Input: default@multi1_mm@p=456 #### A masked pattern was here #### -POSTHOOK: query: select * from import7_mm order by key, p +POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p POSTHOOK: type: QUERY -POSTHOOK: Input: default@import7_mm -POSTHOOK: Input: default@import7_mm@p=455 -POSTHOOK: Input: default@import7_mm@p=456 +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +POSTHOOK: Input: default@multi1_mm@p=455 +POSTHOOK: Input: default@multi1_mm@p=456 #### A masked pattern was here #### -0 456 -10 456 -97 455 -98 455 -PREHOOK: query: drop table import6_mm -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@import6_mm -PREHOOK: Output: default@import6_mm -POSTHOOK: query: drop table import6_mm -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@import6_mm -POSTHOOK: Output: default@import6_mm -PREHOOK: query: drop table import7_mm +0 456 1 +0 456 1 +0 456 1 +0 456 2 +10 456 1 +10 456 1 +10 456 1 +10 456 2 +97 455 1 +97 455 1 +97 455 1 +97 455 2 +98 455 1 +98 455 1 +98 455 1 +98 455 2 +455 97 1 +455 97 1 +455 97 2 +455 97 455 +455 98 1 +455 98 1 +455 98 2 +455 98 455 +456 0 1 +456 0 1 +456 0 2 +456 0 456 +456 10 1 +456 10 1 +456 10 2 +456 10 456 +PREHOOK: query: drop table multi1_mm PREHOOK: type: DROPTABLE -PREHOOK: Input: default@import7_mm -PREHOOK: Output: default@import7_mm -POSTHOOK: query: drop table import7_mm +PREHOOK: Input: default@multi1_mm +PREHOOK: Output: default@multi1_mm +POSTHOOK: query: drop table multi1_mm POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@import7_mm -POSTHOOK: Output: default@import7_mm +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Output: default@multi1_mm PREHOOK: query: drop table intermediate PREHOOK: type: DROPTABLE PREHOOK: Input: default@intermediate From 65a380ddb67b3836b5c3f14ba679f5d52abbbeda Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Tue, 25 Oct 2016 14:05:36 -0700 Subject: [PATCH 19/24] HIVE-14954 : put FSOP manifests for the instances of the same vertex into a directory (Sergey Shelukhin) --- .../hadoop/hive/common/ValidWriteIds.java | 2 +- .../hive/metastore/MmCleanerThread.java | 2 +- .../apache/hadoop/hive/ql/exec/Utilities.java | 73 ++++++++----------- .../rcfile/truncate/ColumnTruncateMapper.java | 1 - .../apache/hadoop/hive/ql/metadata/Hive.java | 1 - .../optimizer/unionproc/UnionProcFactory.java | 1 - .../hive/ql/parse/GenTezProcContext.java | 2 +- .../hadoop/hive/ql/parse/TaskCompiler.java | 1 - 8 files changed, 35 insertions(+), 48 deletions(-) diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java index df0278cd3780..6b382472ef16 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java +++ b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java @@ -133,7 +133,7 @@ public IdPathFilter(long writeId, boolean isMatch) { @Override public boolean accept(Path path) { String name = path.getName(); - return isMatch == (name.equals(mmDirName) || name.startsWith(tmpPrefix)); + return isMatch == (name.equals(mmDirName) || name.equals(tmpPrefix)); } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java index 6a7f588bb8c9..d99b0d7c3acd 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java @@ -288,7 +288,7 @@ private void deleteAbortedWriteIdFiles(String location, HashSet abortedWri LOG.warn(path + " does not exist; assuming that the cleanup is not needed."); return; } - // TODO# do we need to account for any subdirectories here? decide after special-case jiras + // TODO# this doesn't account for list bucketing. Do nothing now, ACID will solve all problems. files = fs.listStatus(path); } catch (Exception ex) { LOG.error("Failed to get files for " + path + "; cannot ensure cleanup for any writes"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 0f8384d044c6..a7050ab00cc1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -35,7 +35,6 @@ import java.io.OutputStream; import java.io.Serializable; import java.net.URI; -import java.net.URISyntaxException; import java.net.URL; import java.net.URLClassLoader; import java.net.URLDecoder; @@ -50,7 +49,6 @@ import java.util.Arrays; import java.util.Calendar; import java.util.Collection; -import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; @@ -149,7 +147,6 @@ import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; -import org.apache.hadoop.hive.ql.plan.ListBucketingCtx; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.MergeJoinWork; @@ -213,10 +210,10 @@ * Utilities. * */ -@SuppressWarnings("nls") +@SuppressWarnings({ "nls", "deprecation" }) public final class Utilities { - // TODO: remove when merging + // TODO# remove when merging; convert some statements to local loggers, remove others public static final Logger LOG14535 = LoggerFactory.getLogger("Log14535"); /** @@ -651,8 +648,8 @@ protected Expression instantiate(Object oldInstance, Encoder out) { } @Override - protected void initialize(Class type, Object oldInstance, Object newInstance, Encoder out) { - Iterator ite = ((Collection) oldInstance).iterator(); + protected void initialize(Class type, Object oldInstance, Object newInstance, Encoder out) { + Iterator ite = ((Collection) oldInstance).iterator(); while (ite.hasNext()) { out.writeStatement(new Statement(oldInstance, "add", new Object[] {ite.next()})); } @@ -3798,10 +3795,6 @@ public static String humanReadableByteCount(long bytes) { private static final String MANIFEST_EXTENSION = ".manifest"; - private static Path getManifestDir(Path specPath, String unionSuffix) { - return (unionSuffix == null) ? specPath : new Path(specPath, unionSuffix); - } - private static void tryDelete(FileSystem fs, Path path) { try { fs.delete(path, true); @@ -3837,26 +3830,20 @@ private static void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manif tryDelete(fs, status.getPath()); } } - files = HiveStatsUtils.getFileStatusRecurse(manifestDir, 1, fs, filter); - if (files != null) { - for (FileStatus status : files) { - Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); - tryDelete(fs, status.getPath()); - } - } + Utilities.LOG14535.info("Deleting " + manifestDir + " on failure"); + fs.delete(manifestDir, true); } public static void writeMmCommitManifest(List commitPaths, Path specPath, FileSystem fs, String taskId, Long mmWriteId, String unionSuffix) throws HiveException { if (commitPaths.isEmpty()) return; - Path manifestPath = getManifestDir(specPath, unionSuffix); - manifestPath = new Path(manifestPath, "_tmp." + ValidWriteIds.getMmFilePrefix( - mmWriteId) + "_" + taskId + MANIFEST_EXTENSION); + // We assume one FSOP per task (per specPath), so we create it in specPath. + Path manifestPath = getManifestDir(specPath, mmWriteId, unionSuffix); + manifestPath = new Path(manifestPath, taskId + MANIFEST_EXTENSION); Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths); try { // Don't overwrite the manifest... should fail if we have collisions. - // We assume one FSOP per task (per specPath), so we create it in specPath. try (FSDataOutputStream out = fs.create(manifestPath, false)) { if (out == null) { throw new HiveException("Failed to create manifest at " + manifestPath); @@ -3871,6 +3858,11 @@ public static void writeMmCommitManifest(List commitPaths, Path specPath, } } + private static Path getManifestDir(Path specPath, long mmWriteId, String unionSuffix) { + Path manifestPath = new Path(specPath, "_tmp." + ValidWriteIds.getMmFilePrefix(mmWriteId)); + return (unionSuffix == null) ? manifestPath : new Path(manifestPath, unionSuffix); + } + public static final class MissingBucketsContext { public final TableDesc tableInfo; public final int numBuckets; @@ -3886,17 +3878,16 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con boolean success, int dpLevels, int lbLevels, MissingBucketsContext mbc, long mmWriteId, Reporter reporter) throws IOException, HiveException { FileSystem fs = specPath.getFileSystem(hconf); - // Manifests would be at the root level, but the results at target level. - Path manifestDir = getManifestDir(specPath, unionSuffix); - - ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); + Path manifestDir = getManifestDir(specPath, mmWriteId, unionSuffix); if (!success) { + ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); tryDeleteAllMmFiles(fs, specPath, manifestDir, dpLevels, lbLevels, unionSuffix, filter, mmWriteId); return; } - FileStatus[] files = HiveStatsUtils.getFileStatusRecurse(manifestDir, 1, fs, filter); + Utilities.LOG14535.info("Looking for manifests in: " + manifestDir + " (" + mmWriteId + ")"); + FileStatus[] files = fs.listStatus(manifestDir); List manifests = new ArrayList<>(); if (files != null) { for (FileStatus status : files) { @@ -3909,6 +3900,7 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } Utilities.LOG14535.info("Looking for files in: " + specPath); + ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); files = getMmDirectoryCandidates( fs, specPath, dpLevels, lbLevels, filter, mmWriteId); ArrayList mmDirectories = new ArrayList<>(); @@ -3940,6 +3932,18 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } } + Utilities.LOG14535.info("Deleting manifest directory " + manifestDir); + tryDelete(fs, manifestDir); + if (unionSuffix != null) { + // Also delete the parent directory if we are the last union FSOP to execute. + manifestDir = manifestDir.getParent(); + FileStatus[] remainingFiles = fs.listStatus(manifestDir); + if (remainingFiles == null || remainingFiles.length == 0) { + Utilities.LOG14535.info("Deleting manifest directory " + manifestDir); + tryDelete(fs, manifestDir); + } + } + for (FileStatus status : mmDirectories) { cleanMmDirectory(status.getPath(), fs, unionSuffix, committed); } @@ -3947,19 +3951,6 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con if (!committed.isEmpty()) { throw new HiveException("The following files were committed but not found: " + committed); } - for (Path mfp : manifests) { - Utilities.LOG14535.info("Deleting manifest " + mfp); - tryDelete(fs, mfp); - } - // Delete the manifest directory if we only created it for manifests; otherwise the - // dynamic partition loader will find it and try to load it as a partition... what a mess. - if (manifestDir != specPath) { - FileStatus[] remainingFiles = fs.listStatus(manifestDir); - if (remainingFiles == null || remainingFiles.length == 0) { - Utilities.LOG14535.info("Deleting directory " + manifestDir); - tryDelete(fs, manifestDir); - } - } if (mmDirectories.isEmpty()) return; @@ -3984,7 +3975,7 @@ private static void cleanMmDirectory(Path dir, FileSystem fs, if (committed.remove(childPath.toString())) continue; // A good file. deleteUncommitedFile(childPath, fs); } else if (!child.isDirectory()) { - if (childPath.getName().endsWith(MANIFEST_EXTENSION)) continue; + // TODO# needed? if (childPath.getName().endsWith(MANIFEST_EXTENSION)) continue; if (committed.contains(childPath.toString())) { throw new HiveException("Union FSOP has commited " + childPath + " outside of union directory" + unionSuffix); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java index d013c6f78969..bd537cda4d05 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateMapper.java @@ -234,7 +234,6 @@ public static void jobClose(Path outputPath, boolean success, JobConf job, ) throws HiveException, IOException { FileSystem fs = outputPath.getFileSystem(job); Path backupPath = backupOutputPath(fs, outputPath, job); - // TODO# special case - what is this about? Utilities.mvFileToFinalPath(outputPath, job, success, LOG, dynPartCtx, null, reporter); fs.delete(backupPath, true); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 30b22d70eb2f..0a298954a458 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1853,7 +1853,6 @@ private Set getValidPartitionsInPath( } else { // The non-MM path only finds new partitions, as it is looking at the temp path. // To produce the same effect, we will find all the partitions affected by this write ID. - // TODO# how would this work with multi-insert into the same table? how does the existing one work? leafStatus = Utilities.getMmDirectoryCandidates( fs, loadPath, numDP, numLB, null, mmWriteId); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java index 3c3770937434..3a38a6de7c7a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java @@ -218,7 +218,6 @@ private void pushOperatorsAboveUnion(UnionOperator union, // each parent List fileDescLists = new ArrayList(); - // TODO# special case #N - unions for (Operator parent : parents) { FileSinkDesc fileSinkDesc = (FileSinkDesc) fileSinkOp.getConf().clone(); fileSinkDesc.setDirName(new Path(parentDirName, parent.getIdentifier())); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java index e1fc10365c05..0c160acf46eb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java @@ -65,7 +65,7 @@ public class GenTezProcContext implements NodeProcessorCtx{ public final ParseContext parseContext; public final HiveConf conf; - public final List> moveTask; // TODO# + public final List> moveTask; // rootTasks is the entry point for all generated tasks public final List> rootTasks; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index d09e40108af0..9b2f005679da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -208,7 +208,6 @@ public void compile(final ParseContext pCtx, final List tsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); mvTask.add(tsk); // Check to see if we are stale'ing any indexes and auto-update them if we want From 0f7f4ed83fffb355666ebf0a0a259872156a133c Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Tue, 25 Oct 2016 14:06:14 -0700 Subject: [PATCH 20/24] HIVE-14953 : don't use globStatus on S3 in MM tables (Sergey Shelukhin) --- .../hadoop/hive/common/ValidWriteIds.java | 5 +- .../org/apache/hadoop/hive/conf/HiveConf.java | 4 + .../apache/hadoop/hive/ql/exec/Utilities.java | 174 +++++++++++++----- .../apache/hadoop/hive/ql/metadata/Hive.java | 35 ++-- 4 files changed, 150 insertions(+), 68 deletions(-) diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java index 6b382472ef16..c61b63aad799 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java +++ b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java @@ -122,18 +122,17 @@ public static String getMmFilePrefix(long mmWriteId) { public static class IdPathFilter implements PathFilter { - private final String mmDirName, tmpPrefix; + private final String mmDirName; private final boolean isMatch; public IdPathFilter(long writeId, boolean isMatch) { this.mmDirName = ValidWriteIds.getMmFilePrefix(writeId); - this.tmpPrefix = "_tmp." + mmDirName; this.isMatch = isMatch; } @Override public boolean accept(Path path) { String name = path.getName(); - return isMatch == (name.equals(mmDirName) || name.equals(tmpPrefix)); + return isMatch == name.equals(mmDirName); } } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 8a00f07500dd..6848811b343e 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -3141,6 +3141,10 @@ public static enum ConfVars { "MM write ID will not be removed up for that long after it has been aborted;\n" + "this is to work around potential races e.g. with FS visibility, when deleting files."), + + HIVE_MM_AVOID_GLOBSTATUS_ON_S3("hive.mm.avoid.s3.globstatus", true, + "Whether to use listFiles (optimized on S3) instead of globStatus when on S3."), + HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list", "hive.security.authenticator.manager,hive.security.authorization.manager," + "hive.security.metastore.authorization.manager,hive.security.metastore.authenticator.manager," + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index a7050ab00cc1..e0af81ead1be 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -85,8 +85,10 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.HiveInterruptCallback; @@ -1524,6 +1526,18 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] ? conf.getTable().getNumBuckets() : 0; return removeTempOrDuplicateFiles(fs, fileStats, dpLevels, numBuckets, hconf, null); } + + private static boolean removeEmptyDpDirectory(FileSystem fs, Path path) throws IOException { + FileStatus[] items = fs.listStatus(path); + // remove empty directory since DP insert should not generate empty partitions. + // empty directories could be generated by crashed Task/ScriptOperator + if (items.length != 0) return false; + if (!fs.delete(path, true)) { + LOG.error("Cannot delete empty directory " + path); + throw new IOException("Cannot delete empty directory " + path); + } + return true; + } public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats, int dpLevels, int numBuckets, Configuration hconf, Long mmWriteId) throws IOException { @@ -1535,21 +1549,15 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] if (dpLevels > 0) { FileStatus parts[] = fileStats; for (int i = 0; i < parts.length; ++i) { - assert parts[i].isDir() : "dynamic partition " + parts[i].getPath() + assert parts[i].isDirectory() : "dynamic partition " + parts[i].getPath() + " is not a directory"; - Utilities.LOG14535.info("removeTempOrDuplicateFiles looking at DP " + parts[i].getPath()); - FileStatus[] items = fs.listStatus(parts[i].getPath()); - - // remove empty directory since DP insert should not generate empty partitions. - // empty directories could be generated by crashed Task/ScriptOperator - if (items.length == 0) { - if (!fs.delete(parts[i].getPath(), true)) { - LOG.error("Cannot delete empty directory " + parts[i].getPath()); - throw new IOException("Cannot delete empty directory " + parts[i].getPath()); - } + Path path = parts[i].getPath(); + Utilities.LOG14535.info("removeTempOrDuplicateFiles looking at DP " + path); + if (removeEmptyDpDirectory(fs, path)) { parts[i] = null; continue; } + FileStatus[] items = fs.listStatus(path); if (mmWriteId != null) { Path mmDir = parts[i].getPath(); @@ -1575,8 +1583,7 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] throw new IOException("Unexpected directories for non-DP MM: " + Arrays.toString(items)); } Path mmDir = items[0].getPath(); - if (!items[0].isDirectory() - || !mmDir.getName().equals(ValidWriteIds.getMmFilePrefix(mmWriteId))) { + if (!mmDir.getName().equals(ValidWriteIds.getMmFilePrefix(mmWriteId))) { throw new IOException("Unexpected non-MM directory " + mmDir); } Utilities.LOG14535.info( @@ -3803,31 +3810,98 @@ private static void tryDelete(FileSystem fs, Path path) { } } - public static FileStatus[] getMmDirectoryCandidates(FileSystem fs, Path path, int dpLevels, - int lbLevels, PathFilter filter, long mmWriteId) throws IOException { + public static Path[] getMmDirectoryCandidates(FileSystem fs, Path path, int dpLevels, + int lbLevels, PathFilter filter, long mmWriteId, Configuration conf) throws IOException { + int skipLevels = dpLevels + lbLevels; + if (filter == null) { + filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); + } + if (skipLevels == 0) { + return statusToPath(fs.listStatus(path, filter)); + } + if (fs.getScheme().equalsIgnoreCase("s3a") + && HiveConf.getBoolVar(conf, ConfVars.HIVE_MM_AVOID_GLOBSTATUS_ON_S3)) { + return getMmDirectoryCandidatesRecursive(fs, path, skipLevels, filter); + } + return getMmDirectoryCandidatesGlobStatus(fs, path, skipLevels, filter, mmWriteId); + } + + private static Path[] statusToPath(FileStatus[] statuses) { + if (statuses == null) return null; + Path[] paths = new Path[statuses.length]; + for (int i = 0; i < statuses.length; ++i) { + paths[i] = statuses[i].getPath(); + } + return paths; + } + + private static Path[] getMmDirectoryCandidatesRecursive(FileSystem fs, + Path path, int skipLevels, PathFilter filter) throws IOException { + String lastRelDir = null; + HashSet results = new HashSet(); + String relRoot = Path.getPathWithoutSchemeAndAuthority(path).toString(); + if (!relRoot.endsWith(Path.SEPARATOR)) { + relRoot += Path.SEPARATOR; + } + RemoteIterator allFiles = fs.listFiles(path, true); + while (allFiles.hasNext()) { + LocatedFileStatus lfs = allFiles.next(); + Path dirPath = Path.getPathWithoutSchemeAndAuthority(lfs.getPath()); + String dir = dirPath.toString(); + if (!dir.startsWith(relRoot)) { + throw new IOException("Path " + lfs.getPath() + " is not under " + relRoot + + " (when shortened to " + dir + ")"); + } + String subDir = dir.substring(relRoot.length()); + Utilities.LOG14535.info("Looking at " + subDir + " from " + lfs.getPath()); + // If sorted, we'll skip a bunch of files. + if (lastRelDir != null && subDir.startsWith(lastRelDir)) continue; + int startIx = skipLevels > 0 ? -1 : 0; + for (int i = 0; i < skipLevels; ++i) { + startIx = subDir.indexOf(Path.SEPARATOR_CHAR, startIx + 1); + if (startIx == -1) { + Utilities.LOG14535.info("Expected level of nesting (" + skipLevels + ") is not " + + " present in " + subDir + " (from " + lfs.getPath() + ")"); + break; + } + } + if (startIx == -1) continue; + int endIx = subDir.indexOf(Path.SEPARATOR_CHAR, startIx + 1); + if (endIx == -1) { + Utilities.LOG14535.info("Expected level of nesting (" + (skipLevels + 1) + ") is not " + + " present in " + subDir + " (from " + lfs.getPath() + ")"); + continue; + } + lastRelDir = subDir = subDir.substring(0, endIx); + Path candidate = new Path(relRoot, subDir); + Utilities.LOG14535.info("Considering MM directory candidate " + candidate); + if (!filter.accept(candidate)) continue; + results.add(fs.makeQualified(candidate)); + } + return results.toArray(new Path[results.size()]); + } + + private static Path[] getMmDirectoryCandidatesGlobStatus(FileSystem fs, + Path path, int skipLevels, PathFilter filter, long mmWriteId) throws IOException { StringBuilder sb = new StringBuilder(path.toUri().getPath()); - for (int i = 0; i < dpLevels + lbLevels; i++) { + for (int i = 0; i < skipLevels; i++) { sb.append(Path.SEPARATOR).append("*"); } sb.append(Path.SEPARATOR).append(ValidWriteIds.getMmFilePrefix(mmWriteId)); - Utilities.LOG14535.info("Looking for files via: " + sb.toString()); Path pathPattern = new Path(path, sb.toString()); - if (filter == null) { - // TODO: do we need this? Likely yes; we don't want mm_10 when we use ".../mm_1" pattern. - filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); - } - return filter == null ? fs.globStatus(pathPattern) : fs.globStatus(pathPattern, filter); + Utilities.LOG14535.info("Looking for files via: " + pathPattern); + return statusToPath(fs.globStatus(pathPattern, filter)); } private static void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manifestDir, int dpLevels, int lbLevels, String unionSuffix, ValidWriteIds.IdPathFilter filter, - long mmWriteId) throws IOException { - FileStatus[] files = getMmDirectoryCandidates( - fs, specPath, dpLevels, lbLevels, filter, mmWriteId); + long mmWriteId, Configuration conf) throws IOException { + Path[] files = getMmDirectoryCandidates( + fs, specPath, dpLevels, lbLevels, filter, mmWriteId, conf); if (files != null) { - for (FileStatus status : files) { - Utilities.LOG14535.info("Deleting " + status.getPath() + " on failure"); - tryDelete(fs, status.getPath()); + for (Path path : files) { + Utilities.LOG14535.info("Deleting " + path + " on failure"); + tryDelete(fs, path); } } Utilities.LOG14535.info("Deleting " + manifestDir + " on failure"); @@ -3882,15 +3956,15 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con if (!success) { ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); tryDeleteAllMmFiles(fs, specPath, manifestDir, dpLevels, lbLevels, - unionSuffix, filter, mmWriteId); + unionSuffix, filter, mmWriteId, hconf); return; } Utilities.LOG14535.info("Looking for manifests in: " + manifestDir + " (" + mmWriteId + ")"); - FileStatus[] files = fs.listStatus(manifestDir); + FileStatus[] manifestFiles = fs.listStatus(manifestDir); List manifests = new ArrayList<>(); - if (files != null) { - for (FileStatus status : files) { + if (manifestFiles != null) { + for (FileStatus status : manifestFiles) { Path path = status.getPath(); if (path.getName().endsWith(MANIFEST_EXTENSION)) { Utilities.LOG14535.info("Reading manifest " + path); @@ -3901,21 +3975,13 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con Utilities.LOG14535.info("Looking for files in: " + specPath); ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); - files = getMmDirectoryCandidates( - fs, specPath, dpLevels, lbLevels, filter, mmWriteId); - ArrayList mmDirectories = new ArrayList<>(); + Path[] files = getMmDirectoryCandidates( + fs, specPath, dpLevels, lbLevels, filter, mmWriteId, hconf); + ArrayList mmDirectories = new ArrayList<>(); if (files != null) { - for (FileStatus status : files) { - Path path = status.getPath(); + for (Path path : files) { Utilities.LOG14535.info("Looking at path: " + path); - if (!status.isDirectory()) { - if (!path.getName().endsWith(MANIFEST_EXTENSION)) { - Utilities.LOG14535.warn("Unknown file found, deleting: " + path); - tryDelete(fs, path); - } - } else { - mmDirectories.add(status); - } + mmDirectories.add(path); } } @@ -3944,8 +4010,8 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } } - for (FileStatus status : mmDirectories) { - cleanMmDirectory(status.getPath(), fs, unionSuffix, committed); + for (Path path : mmDirectories) { + cleanMmDirectory(path, fs, unionSuffix, committed); } if (!committed.isEmpty()) { @@ -3957,7 +4023,12 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con // TODO: see HIVE-14886 - removeTempOrDuplicateFiles is broken for list bucketing, // so maintain parity here by not calling it at all. if (lbLevels != 0) return; - FileStatus[] finalResults = mmDirectories.toArray(new FileStatus[mmDirectories.size()]); + // Create fake file statuses to avoid querying the file system. removeTempOrDuplicateFiles + // doesn't need tocheck anything except path and directory status for MM directories. + FileStatus[] finalResults = new FileStatus[mmDirectories.size()]; + for (int i = 0; i < mmDirectories.size(); ++i) { + finalResults[i] = new PathOnlyFileStatus(mmDirectories.get(i)); + } List emptyBuckets = Utilities.removeTempOrDuplicateFiles( fs, finalResults, dpLevels, mbc == null ? 0 : mbc.numBuckets, hconf, mmWriteId); // create empty buckets if necessary @@ -3967,6 +4038,12 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } } + private static final class PathOnlyFileStatus extends FileStatus { + public PathOnlyFileStatus(Path path) { + super(0, true, 0, 0, 0, path); + } + } + private static void cleanMmDirectory(Path dir, FileSystem fs, String unionSuffix, HashSet committed) throws IOException, HiveException { for (FileStatus child : fs.listStatus(dir)) { @@ -3975,7 +4052,6 @@ private static void cleanMmDirectory(Path dir, FileSystem fs, if (committed.remove(childPath.toString())) continue; // A good file. deleteUncommitedFile(childPath, fs); } else if (!child.isDirectory()) { - // TODO# needed? if (childPath.getName().endsWith(MANIFEST_EXTENSION)) continue; if (committed.contains(childPath.toString())) { throw new HiveException("Union FSOP has commited " + childPath + " outside of union directory" + unionSuffix); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 0a298954a458..c7ac452634c0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -1589,6 +1589,7 @@ public Partition loadPartition(Path loadPath, Table tbl, Map par List newFiles = null; PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin("MoveTask", "FileMoves"); + // TODO: this assumes both paths are qualified; which they are, currently. if (mmWriteId != null && loadPath.equals(newPartPath)) { // MM insert query, move itself is a no-op. Utilities.LOG14535.info("not moving " + loadPath + " to " + newPartPath + " (MM)"); @@ -1705,7 +1706,7 @@ private List listFilesCreatedByQuery(Path loadPath, long mmWriteId) throws FileSystem srcFs; try { srcFs = loadPath.getFileSystem(conf); - srcs = srcFs.globStatus(loadPath); + srcs = srcFs.listStatus(loadPath); } catch (IOException e) { LOG.error("Error listing files", e); throw new HiveException(e); @@ -1847,29 +1848,30 @@ private Set getValidPartitionsInPath( Set validPartitions = new HashSet(); try { FileSystem fs = loadPath.getFileSystem(conf); - FileStatus[] leafStatus = null; if (mmWriteId == null) { - leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP, fs); + FileStatus[] leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP, fs); + // Check for empty partitions + for (FileStatus s : leafStatus) { + if (!s.isDirectory()) { + throw new HiveException("partition " + s.getPath() + " is not a directory!"); + } + Path dpPath = s.getPath(); + Utilities.LOG14535.info("Found DP " + dpPath); + validPartitions.add(dpPath); + } } else { // The non-MM path only finds new partitions, as it is looking at the temp path. // To produce the same effect, we will find all the partitions affected by this write ID. - leafStatus = Utilities.getMmDirectoryCandidates( - fs, loadPath, numDP, numLB, null, mmWriteId); - } - // Check for empty partitions - for (FileStatus s : leafStatus) { - if (mmWriteId == null && !s.isDirectory()) { - throw new HiveException("partition " + s.getPath() + " is not a directory!"); - } - Path dpPath = s.getPath(); - if (mmWriteId != null) { - dpPath = dpPath.getParent(); // Skip the MM directory that we have found. + Path[] leafStatus = Utilities.getMmDirectoryCandidates( + fs, loadPath, numDP, numLB, null, mmWriteId, conf); + for (Path p : leafStatus) { + Path dpPath = p.getParent(); // Skip the MM directory that we have found. for (int i = 0; i < numLB; ++i) { dpPath = dpPath.getParent(); // Now skip the LB directories, if any... } + Utilities.LOG14535.info("Found DP " + dpPath); + validPartitions.add(dpPath); } - Utilities.LOG14535.info("Found DP " + dpPath); - validPartitions.add(dpPath); } } catch (IOException e) { throw new HiveException(e); @@ -2047,6 +2049,7 @@ public void loadTable(Path loadPath, String tableName, boolean replace, boolean if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) { newFiles = Collections.synchronizedList(new ArrayList()); } + // TODO: this assumes both paths are qualified; which they are, currently. if (mmWriteId != null && loadPath.equals(tbl.getPath())) { Utilities.LOG14535.info("not moving " + loadPath + " to " + tbl.getPath()); if (replace) { From c050e691df38df7b2866109a680437b683b2c9ab Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Tue, 25 Oct 2016 17:51:05 -0700 Subject: [PATCH 21/24] HIVE-15064 : fix explain for MM tables - don't output for non-MM tables (Sergey Shelukhin) --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index 1b7d325066a3..86e5f3c74a88 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -144,8 +144,8 @@ public boolean getReplace() { } @Explain(displayName = "micromanaged table") - public boolean isMmTable() { - return mmWriteId != null; + public Boolean isMmTable() { + return mmWriteId != null? true : null; } public void setReplace(boolean replace) { From 38cd0a67cf6af795b874239996e6d3edd5b5df16 Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Tue, 25 Oct 2016 18:20:15 -0700 Subject: [PATCH 22/24] HIVE-15064 : fix explain for MM tables - don't output for non-MM tables (Sergey Shelukhin) --- .../java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index 86e5f3c74a88..7039f1f37568 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -144,10 +144,14 @@ public boolean getReplace() { } @Explain(displayName = "micromanaged table") - public Boolean isMmTable() { + public Boolean isMmTableExplain() { return mmWriteId != null? true : null; } + public boolean isMmTable() { + return mmWriteId != null; + } + public void setReplace(boolean replace) { this.replace = replace; } From 36ad3a405daceb2ce0c1f7979778be03a4ff9bbb Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Tue, 25 Oct 2016 19:26:36 -0700 Subject: [PATCH 23/24] HIVE-14990 : run all tests for MM tables and fix the issues that are found (Sergey Shelukhin) --- .../org/apache/hadoop/hive/metastore/MetaStoreUtils.java | 6 ++++++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java | 1 + .../apache/hadoop/hive/ql/io/CombineHiveInputFormat.java | 7 +++++-- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index c2ce2590f59b..28fcfa8727b3 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -1894,4 +1894,10 @@ public static boolean isMmTable(Map params) { String value = params.get(hive_metastoreConstants.TABLE_IS_MM); return value != null && value.equalsIgnoreCase("true"); } + + public static boolean isMmTable(Properties params) { + // TODO: perhaps it should be a 3rd value for 'transactional'? + String value = params.getProperty(hive_metastoreConstants.TABLE_IS_MM); + return value != null && value.equalsIgnoreCase("true"); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index e0af81ead1be..6774d4d160d1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -3961,6 +3961,7 @@ public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Con } Utilities.LOG14535.info("Looking for manifests in: " + manifestDir + " (" + mmWriteId + ")"); + // TODO# may be wrong if there are no splits (empty insert/CTAS) FileStatus[] manifestFiles = fs.listStatus(manifestDir); List manifests = new ArrayList<>(); if (manifestFiles != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java index e91064b9c75e..59d6142c01e2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.log.PerfLogger; @@ -104,8 +105,10 @@ public Set call() throws Exception { Class inputFormatClass = part.getInputFileFormatClass(); InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, conf); - if (inputFormat instanceof AvoidSplitCombination && - ((AvoidSplitCombination) inputFormat).shouldSkipCombine(paths[i + start], conf)) { + boolean isAvoidSplitCombine = inputFormat instanceof AvoidSplitCombination && + ((AvoidSplitCombination) inputFormat).shouldSkipCombine(paths[i + start], conf); + boolean isMmTable = MetaStoreUtils.isMmTable(part.getTableDesc().getProperties()); + if (isAvoidSplitCombine || isMmTable) { if (LOG.isDebugEnabled()) { LOG.debug("The path [" + paths[i + start] + "] is being parked for HiveInputFormat.getSplits"); From b143f5ce3cf9546ff49eb658a18db43ac319583d Mon Sep 17 00:00:00 2001 From: Sergey Shelukhin Date: Wed, 26 Oct 2016 18:44:30 -0700 Subject: [PATCH 24/24] HIVE-14990 : run all tests for MM tables and fix the issues that are found - issue with FetchOperator (Sergey Shelukhin) --- .../hadoop/hive/ql/exec/FetchOperator.java | 31 +++++++++++++++ .../hadoop/hive/ql/io/HiveInputFormat.java | 39 ++++++++++++------- 2 files changed, 55 insertions(+), 15 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index 7375cd453fef..f89372cfd471 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -36,7 +36,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader; @@ -76,6 +78,7 @@ import org.slf4j.LoggerFactory; import com.google.common.collect.Iterators; +import com.google.common.collect.Lists; /** * FetchTask implementation. @@ -125,6 +128,7 @@ public class FetchOperator implements Serializable { private transient StructObjectInspector outputOI; private transient Object[] row; + private transient Map writeIdMap; public FetchOperator(FetchWork work, JobConf job) throws HiveException { this(work, job, null, null); @@ -369,6 +373,9 @@ protected FetchInputFormatSplit[] getNextSplits() throws Exception { Class formatter = currDesc.getInputFileFormatClass(); Utilities.copyTableJobPropertiesToConf(currDesc.getTableDesc(), job); InputFormat inputFormat = getInputFormatFromCache(formatter, job); + String inputs = processCurrPathForMmWriteIds(inputFormat); + if (inputs == null) return null; + job.set("mapred.input.dir", inputs); InputSplit[] splits = inputFormat.getSplits(job, 1); FetchInputFormatSplit[] inputSplits = new FetchInputFormatSplit[splits.length]; @@ -385,6 +392,30 @@ protected FetchInputFormatSplit[] getNextSplits() throws Exception { return null; } + private String processCurrPathForMmWriteIds(InputFormat inputFormat) throws IOException { + if (inputFormat instanceof HiveInputFormat) { + return StringUtils.escapeString(currPath.toString()); // No need to process here. + } + if (writeIdMap == null) { + writeIdMap = new HashMap(); + } + // No need to check for MM table - if it is, the IDs should be in the job config. + ValidWriteIds ids = HiveInputFormat.extractWriteIds(writeIdMap, job, currDesc.getTableName()); + if (ids != null) { + Utilities.LOG14535.info("Observing " + currDesc.getTableName() + ": " + ids); + } + + Path[] dirs = HiveInputFormat.processPathsForMmRead(Lists.newArrayList(currPath), job, ids); + if (dirs == null || dirs.length == 0) { + return null; // No valid inputs. This condition is logged inside the call. + } + StringBuffer str = new StringBuffer(StringUtils.escapeString(dirs[0].toString())); + for(int i = 1; i < dirs.length;i++) { + str.append(",").append(StringUtils.escapeString(dirs[i].toString())); + } + return str.toString(); + } + private FetchInputFormatSplit[] splitSampling(SplitSample splitSample, FetchInputFormatSplit[] splits) { long totalSize = 0; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index a539799e1262..428093cc8e3b 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -364,19 +364,11 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job pushFilters(conf, tableScan); } - if (writeIds == null) { - FileInputFormat.setInputPaths(conf, dirs.toArray(new Path[dirs.size()])); - } else { - List finalPaths = new ArrayList<>(dirs.size()); - for (Path dir : dirs) { - processForWriteIds(dir, conf, writeIds, finalPaths); - } - if (finalPaths.isEmpty()) { - LOG.warn("No valid inputs found in " + dirs); - return; - } - FileInputFormat.setInputPaths(conf, finalPaths.toArray(new Path[finalPaths.size()])); + Path[] finalDirs = processPathsForMmRead(dirs, conf, writeIds); + if (finalDirs == null) { + return; // No valid inputs. } + FileInputFormat.setInputPaths(conf, finalDirs); conf.setInputFormat(inputFormat.getClass()); int headerCount = 0; @@ -396,7 +388,24 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job } } - private void processForWriteIds(Path dir, JobConf conf, + public static Path[] processPathsForMmRead(List dirs, JobConf conf, + ValidWriteIds writeIds) throws IOException { + if (writeIds == null) { + return dirs.toArray(new Path[dirs.size()]); + } else { + List finalPaths = new ArrayList<>(dirs.size()); + for (Path dir : dirs) { + processForWriteIds(dir, conf, writeIds, finalPaths); + } + if (finalPaths.isEmpty()) { + LOG.warn("No valid inputs found in " + dirs); + return null; + } + return finalPaths.toArray(new Path[finalPaths.size()]); + } + } + + private static void processForWriteIds(Path dir, JobConf conf, ValidWriteIds writeIds, List finalPaths) throws IOException { FileSystem fs = dir.getFileSystem(conf); Utilities.LOG14535.warn("Checking " + dir + " (root) for inputs"); @@ -413,7 +422,7 @@ private void processForWriteIds(Path dir, JobConf conf, } } - private void handleNonMmDirChild(FileStatus file, ValidWriteIds writeIds, + private static void handleNonMmDirChild(FileStatus file, ValidWriteIds writeIds, LinkedList subdirs, List finalPaths) { Path path = file.getPath(); Utilities.LOG14535.warn("Checking " + path + " for inputs"); @@ -561,7 +570,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { return result.toArray(new HiveInputSplit[result.size()]); } - private static ValidWriteIds extractWriteIds(Map writeIdMap, + public static ValidWriteIds extractWriteIds(Map writeIdMap, JobConf newjob, String tableName) { if (StringUtils.isBlank(tableName)) return null; ValidWriteIds writeIds = writeIdMap.get(tableName);