diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java index 1d734f986031..95a553b85609 100644 --- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java @@ -330,9 +330,13 @@ public static String unescapePathName(String path) { */ public static void listStatusRecursively(FileSystem fs, FileStatus fileStatus, List results) throws IOException { + listStatusRecursively(fs, fileStatus, HIDDEN_FILES_PATH_FILTER, results); + } + public static void listStatusRecursively(FileSystem fs, FileStatus fileStatus, + PathFilter filter, List results) throws IOException { if (fileStatus.isDir()) { - for (FileStatus stat : fs.listStatus(fileStatus.getPath(), HIDDEN_FILES_PATH_FILTER)) { + for (FileStatus stat : fs.listStatus(fileStatus.getPath(), filter)) { listStatusRecursively(fs, stat, results); } } else { diff --git a/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java b/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java index 7c9d72fbd2d0..745a868d3ff4 100644 --- a/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/HiveStatsUtils.java @@ -19,16 +19,20 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.LinkedList; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.conf.HiveConf; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Lists; + /** * HiveStatsUtils. * A collection of utilities used for hive statistics. @@ -50,15 +54,26 @@ public class HiveStatsUtils { * @return array of FileStatus * @throws IOException */ - public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem fs) + public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem fs) throws IOException { + return getFileStatusRecurse(path, level, fs, FileUtils.HIDDEN_FILES_PATH_FILTER, false); + } + + public static FileStatus[] getFileStatusRecurse( + Path path, int level, FileSystem fs, PathFilter filter) throws IOException { + return getFileStatusRecurse(path, level, fs, filter, false); + } + + public static FileStatus[] getFileStatusRecurse( + Path path, int level, FileSystem fs, PathFilter filter, boolean allLevelsBelow) + throws IOException { // if level is <0, the return all files/directories under the specified path - if ( level < 0) { + if (level < 0) { List result = new ArrayList(); try { FileStatus fileStatus = fs.getFileStatus(path); - FileUtils.listStatusRecursively(fs, fileStatus, result); + FileUtils.listStatusRecursively(fs, fileStatus, filter, result); } catch (IOException e) { // globStatus() API returns empty FileStatus[] when the specified path // does not exist. But getFileStatus() throw IOException. To mimic the @@ -75,7 +90,31 @@ public static FileStatus[] getFileStatusRecurse(Path path, int level, FileSystem sb.append(Path.SEPARATOR).append("*"); } Path pathPattern = new Path(path, sb.toString()); - return fs.globStatus(pathPattern, FileUtils.HIDDEN_FILES_PATH_FILTER); + if (!allLevelsBelow) { + return fs.globStatus(pathPattern, filter); + } + LinkedList queue = new LinkedList<>(); + List results = new ArrayList(); + for (FileStatus status : fs.globStatus(pathPattern)) { + if (filter.accept(status.getPath())) { + results.add(status); + } + if (status.isDirectory()) { + queue.add(status); + } + } + while (!queue.isEmpty()) { + FileStatus status = queue.poll(); + for (FileStatus child : fs.listStatus(status.getPath())) { + if (filter.accept(child.getPath())) { + results.add(child); + } + if (child.isDirectory()) { + queue.add(child); + } + } + } + return results.toArray(new FileStatus[results.size()]); } public static int getNumBitVectorsForNDVEstimation(Configuration conf) throws Exception { diff --git a/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java new file mode 100644 index 000000000000..c61b63aad799 --- /dev/null +++ b/common/src/java/org/apache/hadoop/hive/common/ValidWriteIds.java @@ -0,0 +1,171 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.common; + +import java.util.Arrays; +import java.util.HashSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ValidWriteIds { + public static final ValidWriteIds NO_WRITE_IDS = new ValidWriteIds(-1, -1, false, null); + + public static final String MM_PREFIX = "mm"; + + private final static Logger LOG = LoggerFactory.getLogger(ValidWriteIds.class); + + private static final String VALID_WRITEIDS_PREFIX = "hive.valid.write.ids."; + private final long lowWatermark, highWatermark; + private final boolean areIdsValid; + private final HashSet ids; + private String source = null; + + public ValidWriteIds( + long lowWatermark, long highWatermark, boolean areIdsValid, HashSet ids) { + this.lowWatermark = lowWatermark; + this.highWatermark = highWatermark; + this.areIdsValid = areIdsValid; + this.ids = ids; + } + + public static ValidWriteIds createFromConf(Configuration conf, String dbName, String tblName) { + return createFromConf(conf, dbName + "." + tblName); + } + + public static ValidWriteIds createFromConf(Configuration conf, String fullTblName) { + String idStr = conf.get(createConfKey(fullTblName), null); + if (idStr == null || idStr.isEmpty()) return null; + return new ValidWriteIds(idStr); + } + + private static String createConfKey(String dbName, String tblName) { + return createConfKey(dbName + "." + tblName); + } + + private static String createConfKey(String fullName) { + return VALID_WRITEIDS_PREFIX + fullName; + } + + private ValidWriteIds(String src) { + // TODO: lifted from ACID config implementation... optimize if needed? e.g. ranges, base64 + String[] values = src.split(":"); + highWatermark = Long.parseLong(values[0]); + lowWatermark = Long.parseLong(values[1]); + if (values.length > 2) { + areIdsValid = Long.parseLong(values[2]) > 0; + ids = new HashSet(); + for(int i = 3; i < values.length; ++i) { + ids.add(Long.parseLong(values[i])); + } + } else { + areIdsValid = false; + ids = null; + } + } + + public void addToConf(Configuration conf, String dbName, String tblName) { + if (source == null) { + source = toString(); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Setting " + createConfKey(dbName, tblName) + " => " + source); + } + conf.set(createConfKey(dbName, tblName), source); + } + + public String toString() { + // TODO: lifted from ACID config implementation... optimize if needed? e.g. ranges, base64 + StringBuilder buf = new StringBuilder(); + buf.append(highWatermark); + buf.append(':'); + buf.append(lowWatermark); + if (ids != null) { + buf.append(':'); + buf.append(areIdsValid ? 1 : 0); + for (long id : ids) { + buf.append(':'); + buf.append(id); + } + } + return buf.toString(); + } + + public boolean isValid(long writeId) { + if (writeId < 0) throw new RuntimeException("Incorrect write ID " + writeId); + if (writeId <= lowWatermark) return true; + if (writeId >= highWatermark) return false; + return ids != null && (areIdsValid == ids.contains(writeId)); + } + + public static String getMmFilePrefix(long mmWriteId) { + return MM_PREFIX + "_" + mmWriteId; + } + + + public static class IdPathFilter implements PathFilter { + private final String mmDirName; + private final boolean isMatch; + public IdPathFilter(long writeId, boolean isMatch) { + this.mmDirName = ValidWriteIds.getMmFilePrefix(writeId); + this.isMatch = isMatch; + } + + @Override + public boolean accept(Path path) { + String name = path.getName(); + return isMatch == name.equals(mmDirName); + } + } + + public static class AnyIdDirFilter implements PathFilter { + @Override + public boolean accept(Path path) { + String name = path.getName(); + if (!name.startsWith(MM_PREFIX + "_")) return false; + String idStr = name.substring(MM_PREFIX.length() + 1); + try { + Long.parseLong(idStr); + } catch (NumberFormatException ex) { + return false; + } + return true; + } + } + public static Long extractWriteId(Path file) { + String fileName = file.getName(); + String[] parts = fileName.split("_", 3); + if (parts.length < 2 || !MM_PREFIX.equals(parts[0])) { + LOG.info("Cannot extract write ID for a MM table: " + file + + " (" + Arrays.toString(parts) + ")"); + return null; + } + long writeId = -1; + try { + writeId = Long.parseLong(parts[1]); + } catch (NumberFormatException ex) { + LOG.info("Cannot extract write ID for a MM table: " + file + + "; parsing " + parts[1] + " got " + ex.getMessage()); + return null; + } + return writeId; + } +} \ No newline at end of file diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 6f168b57f1b0..6848811b343e 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -297,7 +297,10 @@ private static URL checkConfigFile(File f) { HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_MEMORY_TTL, HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY, HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_HBASE_TTL, - HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS + HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS, + HiveConf.ConfVars.HIVE_METASTORE_MM_THREAD_SCAN_INTERVAL, + HiveConf.ConfVars.HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT, + HiveConf.ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT }; /** @@ -1206,6 +1209,8 @@ public static enum ConfVars { HIVETESTMODE("hive.test.mode", false, "Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.", false), + HIVEEXIMTESTMODE("hive.exim.test.mode", false, + "The subset of test mode that only enables custom path handling for ExIm.", false), HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_", "In test mode, specfies prefixes for the output table", false), HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32, @@ -1784,10 +1789,12 @@ public static enum ConfVars { HIVE_TXN_OPERATIONAL_PROPERTIES("hive.txn.operational.properties", 0, "Sets the operational properties that control the appropriate behavior for various\n" - + "versions of the Hive ACID subsystem. Setting it to zero will turn on the legacy mode\n" - + "for ACID, while setting it to one will enable a split-update feature found in the newer\n" - + "version of Hive ACID subsystem. Mostly it is intended to be used as an internal property\n" - + "for future versions of ACID. (See HIVE-14035 for details.)"), + + "versions of the Hive ACID subsystem. Mostly it is intended to be used as an internal property\n" + + "for future versions of ACID. (See HIVE-14035 for details.)\n" + + "0: Turn on the legacy mode for ACID\n" + + "1: Enable split-update feature found in the newer version of Hive ACID subsystem\n" + + "2: Hash-based merge, which combines delta files using GRACE hash join based approach (not implemented)\n" + + "3: Make the table 'quarter-acid' as it only supports insert. But it doesn't require ORC or bucketing."), HIVE_MAX_OPEN_TXNS("hive.max.open.txns", 100000, "Maximum number of open transactions. If \n" + "current open transactions reach this limit, future open transaction requests will be \n" + @@ -3117,6 +3124,26 @@ public static enum ConfVars { "Log tracing id that can be used by upstream clients for tracking respective logs. " + "Truncated to " + LOG_PREFIX_LENGTH + " characters. Defaults to use auto-generated session id."), + HIVE_METASTORE_MM_THREAD_SCAN_INTERVAL("hive.metastore.mm.thread.scan.interval", "900s", + new TimeValidator(TimeUnit.SECONDS), + "MM table housekeeping thread interval in this metastore instance. 0 to disable."), + + HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT("hive.metastore.mm.heartbeat.timeout", "1800s", + new TimeValidator(TimeUnit.SECONDS), + "MM write ID times out after this long if a heartbeat is not send. Currently disabled."), + + HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT("hive.metastore.mm.absolute.timeout", "7d", + new TimeValidator(TimeUnit.SECONDS), + "MM write ID cannot be outstanding for more than this long."), + + HIVE_METASTORE_MM_ABORTED_GRACE_PERIOD("hive.metastore.mm.aborted.grace.period", "1d", + new TimeValidator(TimeUnit.SECONDS), + "MM write ID will not be removed up for that long after it has been aborted;\n" + + "this is to work around potential races e.g. with FS visibility, when deleting files."), + + + HIVE_MM_AVOID_GLOBSTATUS_ON_S3("hive.mm.avoid.s3.globstatus", true, + "Whether to use listFiles (optimized on S3) instead of globStatus when on S3."), HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list", "hive.security.authenticator.manager,hive.security.authorization.manager," + diff --git a/common/src/test/org/apache/hive/common/util/MockFileSystem.java b/common/src/test/org/apache/hive/common/util/MockFileSystem.java new file mode 100644 index 000000000000..e65fd33d047c --- /dev/null +++ b/common/src/test/org/apache/hive/common/util/MockFileSystem.java @@ -0,0 +1,622 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.common.util; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FSInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.RemoteIterator; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Progressable; + +public class MockFileSystem extends FileSystem { + final List files = new ArrayList(); + final Map fileStatusMap = new HashMap<>(); + Path workingDir = new Path("/"); + // statics for when the mock fs is created via FileSystem.get + private static String blockedUgi = null; + private final static List globalFiles = new ArrayList(); + protected Statistics statistics; + public boolean allowDelete = false; + + public MockFileSystem() { + // empty + } + + @Override + public void initialize(URI uri, Configuration conf) { + setConf(conf); + statistics = getStatistics("mock", getClass()); + } + + public MockFileSystem(Configuration conf, MockFile... files) { + setConf(conf); + this.files.addAll(Arrays.asList(files)); + statistics = getStatistics("mock", getClass()); + } + + public static void setBlockedUgi(String s) { + blockedUgi = s; + } + + public void clear() { + files.clear(); + } + + @Override + public URI getUri() { + try { + return new URI("mock:///"); + } catch (URISyntaxException err) { + throw new IllegalArgumentException("huh?", err); + } + } + + // increments file modification time + public void touch(MockFile file) { + if (fileStatusMap.containsKey(file)) { + FileStatus fileStatus = fileStatusMap.get(file); + FileStatus fileStatusNew = new FileStatus(fileStatus.getLen(), fileStatus.isDirectory(), + fileStatus.getReplication(), fileStatus.getBlockSize(), + fileStatus.getModificationTime() + 1, fileStatus.getAccessTime(), + fileStatus.getPermission(), fileStatus.getOwner(), fileStatus.getGroup(), + fileStatus.getPath()); + fileStatusMap.put(file, fileStatusNew); + } + } + + @SuppressWarnings("serial") + public static class MockAccessDenied extends IOException { + } + + @Override + public FSDataInputStream open(Path path, int i) throws IOException { + statistics.incrementReadOps(1); + checkAccess(); + MockFile file = findFile(path); + if (file != null) return new FSDataInputStream(new MockInputStream(file)); + throw new IOException("File not found: " + path); + } + + public MockFile findFile(Path path) { + for (MockFile file: files) { + if (file.path.equals(path)) { + return file; + } + } + for (MockFile file: globalFiles) { + if (file.path.equals(path)) { + return file; + } + } + return null; + } + + private void checkAccess() throws IOException { + if (blockedUgi == null) return; + if (!blockedUgi.equals(UserGroupInformation.getCurrentUser().getShortUserName())) return; + throw new MockAccessDenied(); + } + + @Override + public FSDataOutputStream create(Path path, FsPermission fsPermission, + boolean overwrite, int bufferSize, + short replication, long blockSize, + Progressable progressable + ) throws IOException { + statistics.incrementWriteOps(1); + checkAccess(); + MockFile file = findFile(path); + if (file == null) { + file = new MockFile(path.toString(), (int) blockSize, new byte[0]); + files.add(file); + } + return new MockOutputStream(file); + } + + @Override + public FSDataOutputStream append(Path path, int bufferSize, + Progressable progressable + ) throws IOException { + statistics.incrementWriteOps(1); + checkAccess(); + return create(path, FsPermission.getDefault(), true, bufferSize, + (short) 3, 256 * 1024, progressable); + } + + @Override + public boolean rename(Path path, Path path2) throws IOException { + statistics.incrementWriteOps(1); + checkAccess(); + return false; + } + + @Override + public boolean delete(Path path) throws IOException { + statistics.incrementWriteOps(1); + checkAccess(); + return false; + } + + @Override + public boolean delete(Path path, boolean isRecursive) throws IOException { + statistics.incrementWriteOps(1); + checkAccess(); + return allowDelete && isRecursive && deleteMatchingFiles(files, path.toString()); + } + + @Override + public RemoteIterator listLocatedStatus(final Path f) + throws IOException { + return new RemoteIterator() { + private Iterator iterator = listLocatedFileStatuses(f).iterator(); + + @Override + public boolean hasNext() throws IOException { + return iterator.hasNext(); + } + + @Override + public LocatedFileStatus next() throws IOException { + return iterator.next(); + } + }; + } + + private List listLocatedFileStatuses(Path path) throws IOException { + statistics.incrementReadOps(1); + checkAccess(); + path = path.makeQualified(this); + List result = new ArrayList<>(); + String pathname = path.toString(); + String pathnameAsDir = pathname + "/"; + Set dirs = new TreeSet(); + MockFile file = findFile(path); + if (file != null) { + result.add(createLocatedStatus(file)); + return result; + } + findMatchingLocatedFiles(files, pathnameAsDir, dirs, result); + findMatchingLocatedFiles(globalFiles, pathnameAsDir, dirs, result); + // for each directory add it once + for(String dir: dirs) { + result.add(createLocatedDirectory(new MockPath(this, pathnameAsDir + dir))); + } + return result; + } + + @Override + public FileStatus[] listStatus(Path path) throws IOException { + statistics.incrementReadOps(1); + checkAccess(); + path = path.makeQualified(this); + List result = new ArrayList(); + String pathname = path.toString(); + String pathnameAsDir = pathname + "/"; + Set dirs = new TreeSet(); + MockFile file = findFile(path); + if (file != null) { + return new FileStatus[]{createStatus(file)}; + } + findMatchingFiles(files, pathnameAsDir, dirs, result); + findMatchingFiles(globalFiles, pathnameAsDir, dirs, result); + // for each directory add it once + for(String dir: dirs) { + result.add(createDirectory(new MockPath(this, pathnameAsDir + dir))); + } + return result.toArray(new FileStatus[result.size()]); + } + + private void findMatchingFiles( + List files, String pathnameAsDir, Set dirs, List result) { + for (MockFile file: files) { + String filename = file.path.toString(); + if (filename.startsWith(pathnameAsDir)) { + String tail = filename.substring(pathnameAsDir.length()); + int nextSlash = tail.indexOf('/'); + if (nextSlash > 0) { + dirs.add(tail.substring(0, nextSlash)); + } else { + result.add(createStatus(file)); + } + } + } + } + + private boolean deleteMatchingFiles(List files, String path) { + Iterator fileIter = files.iterator(); + boolean result = true; + while (fileIter.hasNext()) { + MockFile file = fileIter.next(); + String filename = file.path.toString(); + if (!filename.startsWith(path)) continue; + if (filename.length() <= path.length() || filename.charAt(path.length()) != '/') continue; + if (file.cannotDelete) { + result = false; + continue; + } + assert !file.isDeleted; + file.isDeleted = true; + fileIter.remove(); + } + return result; + } + + private void findMatchingLocatedFiles( + List files, String pathnameAsDir, Set dirs, List result) + throws IOException { + for (MockFile file: files) { + String filename = file.path.toString(); + if (filename.startsWith(pathnameAsDir)) { + String tail = filename.substring(pathnameAsDir.length()); + int nextSlash = tail.indexOf('/'); + if (nextSlash > 0) { + dirs.add(tail.substring(0, nextSlash)); + } else { + result.add(createLocatedStatus(file)); + } + } + } + } + + @Override + public void setWorkingDirectory(Path path) { + workingDir = path; + } + + @Override + public Path getWorkingDirectory() { + return workingDir; + } + + @Override + public boolean mkdirs(Path path, FsPermission fsPermission) { + statistics.incrementWriteOps(1); + return false; + } + + private FileStatus createStatus(MockFile file) { + if (fileStatusMap.containsKey(file)) { + return fileStatusMap.get(file); + } + FileStatus fileStatus = new FileStatus(file.length, false, 1, file.blockSize, 0, 0, + FsPermission.createImmutable((short) 644), "owen", "group", + file.path); + fileStatusMap.put(file, fileStatus); + return fileStatus; + } + + private FileStatus createDirectory(Path dir) { + return new FileStatus(0, true, 0, 0, 0, 0, + FsPermission.createImmutable((short) 755), "owen", "group", dir); + } + + private LocatedFileStatus createLocatedStatus(MockFile file) throws IOException { + FileStatus fileStatus = createStatus(file); + return new LocatedFileStatus(fileStatus, + getFileBlockLocationsImpl(fileStatus, 0, fileStatus.getLen(), false)); + } + + private LocatedFileStatus createLocatedDirectory(Path dir) throws IOException { + FileStatus fileStatus = createDirectory(dir); + return new LocatedFileStatus(fileStatus, + getFileBlockLocationsImpl(fileStatus, 0, fileStatus.getLen(), false)); + } + + @Override + public FileStatus getFileStatus(Path path) throws IOException { + statistics.incrementReadOps(1); + checkAccess(); + path = path.makeQualified(this); + String pathnameAsDir = path.toString() + "/"; + MockFile file = findFile(path); + if (file != null) return createStatus(file); + for (MockFile dir : files) { + if (dir.path.toString().startsWith(pathnameAsDir)) { + return createDirectory(path); + } + } + for (MockFile dir : globalFiles) { + if (dir.path.toString().startsWith(pathnameAsDir)) { + return createDirectory(path); + } + } + throw new FileNotFoundException("File " + path + " does not exist"); + } + + @Override + public BlockLocation[] getFileBlockLocations(FileStatus stat, + long start, long len) throws IOException { + return getFileBlockLocationsImpl(stat, start, len, true); + } + + private BlockLocation[] getFileBlockLocationsImpl(final FileStatus stat, final long start, + final long len, + final boolean updateStats) throws IOException { + if (updateStats) { + statistics.incrementReadOps(1); + } + checkAccess(); + List result = new ArrayList(); + MockFile file = findFile(stat.getPath()); + if (file != null) { + for(MockBlock block: file.blocks) { + if (getOverlap(block.offset, block.length, start, len) > 0) { + String[] topology = new String[block.hosts.length]; + for(int i=0; i < topology.length; ++i) { + topology[i] = "/rack/ " + block.hosts[i]; + } + result.add(new BlockLocation(block.hosts, block.hosts, + topology, block.offset, block.length)); + } + } + return result.toArray(new BlockLocation[result.size()]); + } + return new BlockLocation[0]; + } + + + /** + * Compute the number of bytes that overlap between the two ranges. + * @param offset1 start of range1 + * @param length1 length of range1 + * @param offset2 start of range2 + * @param length2 length of range2 + * @return the number of bytes in the overlap range + */ + private static long getOverlap(long offset1, long length1, long offset2, long length2) { + // c/p from OrcInputFormat + long end1 = offset1 + length1; + long end2 = offset2 + length2; + if (end2 <= offset1 || end1 <= offset2) { + return 0; + } else { + return Math.min(end1, end2) - Math.max(offset1, offset2); + } + } + + @Override + public String toString() { + StringBuilder buffer = new StringBuilder(); + buffer.append("mockFs{files:["); + for(int i=0; i < files.size(); ++i) { + if (i != 0) { + buffer.append(", "); + } + buffer.append(files.get(i)); + } + buffer.append("]}"); + return buffer.toString(); + } + + public static void addGlobalFile(MockFile mockFile) { + globalFiles.add(mockFile); + } + + public static void clearGlobalFiles() { + globalFiles.clear(); + } + + + public static class MockBlock { + int offset; + int length; + final String[] hosts; + + public MockBlock(String... hosts) { + this.hosts = hosts; + } + + public void setOffset(int offset) { + this.offset = offset; + } + + public void setLength(int length) { + this.length = length; + } + + @Override + public String toString() { + StringBuilder buffer = new StringBuilder(); + buffer.append("block{offset: "); + buffer.append(offset); + buffer.append(", length: "); + buffer.append(length); + buffer.append(", hosts: ["); + for(int i=0; i < hosts.length; i++) { + if (i != 0) { + buffer.append(", "); + } + buffer.append(hosts[i]); + } + buffer.append("]}"); + return buffer.toString(); + } + } + + public static class MockFile { + public final Path path; + public int blockSize; + public int length; + public MockBlock[] blocks; + public byte[] content; + public boolean cannotDelete = false; + // This is purely for testing convenience; has no bearing on FS operations such as list. + public boolean isDeleted = false; + + public MockFile(String path, int blockSize, byte[] content, + MockBlock... blocks) { + this.path = new Path(path); + this.blockSize = blockSize; + this.blocks = blocks; + this.content = content; + this.length = content.length; + int offset = 0; + for(MockBlock block: blocks) { + block.offset = offset; + block.length = Math.min(length - offset, blockSize); + offset += block.length; + } + } + + @Override + public int hashCode() { + return path.hashCode() + 31 * length; + } + + @Override + public boolean equals(final Object obj) { + if (!(obj instanceof MockFile)) { return false; } + return ((MockFile) obj).path.equals(this.path) && ((MockFile) obj).length == this.length; + } + + @Override + public String toString() { + StringBuilder buffer = new StringBuilder(); + buffer.append("mockFile{path: "); + buffer.append(path.toString()); + buffer.append(", blkSize: "); + buffer.append(blockSize); + buffer.append(", len: "); + buffer.append(length); + buffer.append(", blocks: ["); + for(int i=0; i < blocks.length; i++) { + if (i != 0) { + buffer.append(", "); + } + buffer.append(blocks[i]); + } + buffer.append("]}"); + return buffer.toString(); + } + } + + static class MockInputStream extends FSInputStream { + final MockFile file; + int offset = 0; + + public MockInputStream(MockFile file) throws IOException { + this.file = file; + } + + @Override + public void seek(long offset) throws IOException { + this.offset = (int) offset; + } + + @Override + public long getPos() throws IOException { + return offset; + } + + @Override + public boolean seekToNewSource(long l) throws IOException { + return false; + } + + @Override + public int read() throws IOException { + if (offset < file.length) { + return file.content[offset++] & 0xff; + } + return -1; + } + } + + public static class MockPath extends Path { + private final FileSystem fs; + public MockPath(FileSystem fs, String path) { + super(path); + this.fs = fs; + } + @Override + public FileSystem getFileSystem(Configuration conf) { + return fs; + } + } + + public static class MockOutputStream extends FSDataOutputStream { + public final MockFile file; + + public MockOutputStream(MockFile file) throws IOException { + super(new DataOutputBuffer(), null); + this.file = file; + } + + /** + * Set the blocks and their location for the file. + * Must be called after the stream is closed or the block length will be + * wrong. + * @param blocks the list of blocks + */ + public void setBlocks(MockBlock... blocks) { + file.blocks = blocks; + int offset = 0; + int i = 0; + while (offset < file.length && i < blocks.length) { + blocks[i].offset = offset; + blocks[i].length = Math.min(file.length - offset, file.blockSize); + offset += blocks[i].length; + i += 1; + } + } + + @Override + public void close() throws IOException { + super.close(); + DataOutputBuffer buf = (DataOutputBuffer) getWrappedStream(); + file.length = buf.getLength(); + file.content = new byte[file.length]; + MockBlock block = new MockBlock("host1"); + block.setLength(file.length); + setBlocks(block); + System.arraycopy(buf.getData(), 0, file.content, 0, file.length); + } + + @Override + public String toString() { + return "Out stream to " + file.toString(); + } + } + + public void addFile(MockFile file) { + files.add(file); + } +} \ No newline at end of file diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 76c16367a070..0c51a68a4838 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -103,7 +103,7 @@ protected void setUp() { db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, IgnoreKeyTextOutputFormat.class); - db.loadTable(hadoopDataFile[i], src, false, false, false, false, false); + db.loadTable(hadoopDataFile[i], src, false, false, false, false, false, null); i++; } diff --git a/itests/pom.xml b/itests/pom.xml index e5b54bfac31a..e039d92bf611 100644 --- a/itests/pom.xml +++ b/itests/pom.xml @@ -73,34 +73,6 @@ set -x - /bin/pwd - BASE_DIR=./target - HIVE_ROOT=$BASE_DIR/../../../ - DOWNLOAD_DIR=./../thirdparty - download() { - url=$1; - finalName=$2 - tarName=$(basename $url) - rm -rf $BASE_DIR/$finalName - if [[ ! -f $DOWNLOAD_DIR/$tarName ]] - then - curl -Sso $DOWNLOAD_DIR/$tarName $url - else - local md5File="$tarName".md5sum - curl -Sso $DOWNLOAD_DIR/$md5File "$url".md5sum - cd $DOWNLOAD_DIR - if ! md5sum -c $md5File; then - curl -Sso $DOWNLOAD_DIR/$tarName $url || return 1 - fi - - cd - - fi - tar -zxf $DOWNLOAD_DIR/$tarName -C $BASE_DIR - mv $BASE_DIR/spark-${spark.version}-bin-hadoop2-without-hive $BASE_DIR/$finalName - } - mkdir -p $DOWNLOAD_DIR - download "http://d3jw87u4immizc.cloudfront.net/spark-tarball/spark-${spark.version}-bin-hadoop2-without-hive.tgz" "spark" - cp -f $HIVE_ROOT/data/conf/spark/log4j2.properties $BASE_DIR/spark/conf/ diff --git a/itests/qtest-spark/pom.xml b/itests/qtest-spark/pom.xml index 240852e619f5..698d467b9c3e 100644 --- a/itests/qtest-spark/pom.xml +++ b/itests/qtest-spark/pom.xml @@ -349,6 +349,38 @@ + + org.codehaus.mojo + build-helper-maven-plugin + ${maven.build-helper.plugin.version} + + + add-test-sources + generate-test-sources + + add-test-source + + + + target/generated-test-sources/java + + + + + + + + + + + spark-test + + + !skipSparkTests + + + + org.apache.maven.plugins maven-antrun-plugin @@ -390,26 +422,8 @@ - - org.codehaus.mojo - build-helper-maven-plugin - ${maven.build-helper.plugin.version} - - - add-test-sources - generate-test-sources - - add-test-source - - - - target/generated-test-sources/java - - - - - - + + diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index 42cb6f0de455..01a919bc7127 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -305,7 +305,9 @@ struct Table { 11: string viewExpandedText, // expanded view text, null for non-view 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE 13: optional PrincipalPrivilegeSet privileges, - 14: optional bool temporary=false + 14: optional bool temporary=false, + 15: optional i64 mmNextWriteId, + 16: optional i64 mmWatermarkWriteId } struct Partition { @@ -893,6 +895,44 @@ struct CacheFileMetadataRequest { 4: optional bool isAllParts } + +struct GetNextWriteIdRequest { + 1: required string dbName, + 2: required string tblName +} +struct GetNextWriteIdResult { + 1: required i64 writeId +} + +struct FinalizeWriteIdRequest { + 1: required string dbName, + 2: required string tblName, + 3: required i64 writeId, + 4: required bool commit +} +struct FinalizeWriteIdResult { +} + +struct HeartbeatWriteIdRequest { + 1: required string dbName, + 2: required string tblName, + 3: required i64 writeId +} +struct HeartbeatWriteIdResult { +} + +struct GetValidWriteIdsRequest { + 1: required string dbName, + 2: required string tblName +} +struct GetValidWriteIdsResult { + 1: required i64 lowWatermarkId, + 2: required i64 highWatermarkId, + 3: optional bool areIdsValid, + 4: optional list ids +} + + struct GetAllFunctionsResponse { 1: optional list functions } @@ -1442,6 +1482,10 @@ service ThriftHiveMetastore extends fb303.FacebookService ClearFileMetadataResult clear_file_metadata(1:ClearFileMetadataRequest req) CacheFileMetadataResult cache_file_metadata(1:CacheFileMetadataRequest req) + GetNextWriteIdResult get_next_write_id(1:GetNextWriteIdRequest req) + FinalizeWriteIdResult finalize_write_id(1:FinalizeWriteIdRequest req) + HeartbeatWriteIdResult heartbeat_write_id(1:HeartbeatWriteIdRequest req) + GetValidWriteIdsResult get_valid_write_ids(1:GetValidWriteIdsRequest req) } // * Note about the DDL_TIME: When creating or altering a table or a partition, @@ -1480,5 +1524,7 @@ const string META_TABLE_STORAGE = "storage_handler", const string TABLE_IS_TRANSACTIONAL = "transactional", const string TABLE_NO_AUTO_COMPACT = "no_auto_compaction", const string TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties", +const string TABLE_IS_MM = "hivecommit", + diff --git a/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql b/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql new file mode 100644 index 000000000000..cb6e5f6f16f9 --- /dev/null +++ b/metastore/scripts/upgrade/derby/037-HIVE-14637.derby.sql @@ -0,0 +1,6 @@ +ALTER TABLE "TBLS" ADD "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1; +ALTER TABLE "TBLS" ADD "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0; +CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "CREATED" BIGINT NOT NULL, "LAST_HEARTBEAT" BIGINT NOT NULL); +ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); +ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; +CREATE UNIQUE INDEX "APP"."UNIQUEWRITE" ON "APP"."TBL_WRITES" ("TBL_ID", "WRITE_ID"); diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql index ae980e0899df..9da1703dae48 100644 --- a/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql +++ b/metastore/scripts/upgrade/derby/hive-schema-2.2.0.derby.sql @@ -60,7 +60,7 @@ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), " CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR); +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(128), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "MM_WATERMARK_WRITE_ID" BIGINT DEFAULT -1, "MM_NEXT_WRITE_ID" BIGINT DEFAULT 0); CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); @@ -112,6 +112,16 @@ ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID"); +CREATE TABLE "APP"."TBL_WRITES" ("TW_ID" BIGINT NOT NULL, "TBL_ID" BIGINT NOT NULL, "WRITE_ID" BIGINT NOT NULL, "STATE" CHAR(1) NOT NULL, "CREATED" BIGINT NOT NULL, "LAST_HEARTBEAT" BIGINT NOT NULL); + +ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); + +ALTER TABLE "APP"."TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +CREATE UNIQUE INDEX "APP"."UNIQUEWRITE" ON "APP"."TBL_WRITES" ("TBL_ID", "WRITE_ID"); + + + -- ---------------------------------------------- -- DDL Statements for indexes -- ---------------------------------------------- diff --git a/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql index 25a5e37c2708..67750a6e2176 100644 --- a/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql +++ b/metastore/scripts/upgrade/derby/upgrade-2.1.0-to-2.2.0.derby.sql @@ -1,3 +1,5 @@ -- Upgrade MetaStore schema from 2.1.0 to 2.2.0 +RUN '037-HIVE-14637.derby.sql'; + UPDATE "APP".VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; diff --git a/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql b/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql new file mode 100644 index 000000000000..9666d2be1d44 --- /dev/null +++ b/metastore/scripts/upgrade/mssql/022-HIVE-14637.mssql.sql @@ -0,0 +1,15 @@ +ALTER TABLE TBLS ADD MM_WATERMARK_WRITE_ID BIGINT DEFAULT -1; +ALTER TABLE TBLS ADD MM_NEXT_WRITE_ID BIGINT DEFAULT 0; + +CREATE TABLE TBL_WRITES +( + TW_ID BIGINT NOT NULL, + TBL_ID BIGINT NOT NULL, + WRITE_ID BIGINT NOT NULL, + STATE CHAR(1) NOT NULL, + CREATED BIGINT NOT NULL, + LAST_HEARTBEAT BIGINT NOT NULL +); +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; +CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID); diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql index 8735b506f85e..31016e201c31 100644 --- a/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql +++ b/metastore/scripts/upgrade/mssql/hive-schema-2.2.0.mssql.sql @@ -358,7 +358,9 @@ CREATE TABLE TBLS TBL_NAME nvarchar(128) NULL, TBL_TYPE nvarchar(128) NULL, VIEW_EXPANDED_TEXT text NULL, - VIEW_ORIGINAL_TEXT text NULL + VIEW_ORIGINAL_TEXT text NULL, + MM_WATERMARK_WRITE_ID BIGINT NULL DEFAULT -1, + MM_NEXT_WRITE_ID BIGINT NULL DEFAULT 0 ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); @@ -591,6 +593,24 @@ CREATE TABLE NOTIFICATION_SEQUENCE ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID); + +CREATE TABLE TBL_WRITES +( + TW_ID BIGINT NOT NULL, + TBL_ID BIGINT NOT NULL, + WRITE_ID BIGINT NOT NULL, + STATE CHAR(1) NOT NULL, + CREATED BIGINT NOT NULL, + LAST_HEARTBEAT BIGINT NOT NULL +); + +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); + +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID); + + -- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey] -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex] diff --git a/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql index df972065ddef..2e6f39447722 100644 --- a/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql +++ b/metastore/scripts/upgrade/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql @@ -1,4 +1,6 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE; +:r 022-HIVE-14637.mssql.sql + UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS MESSAGE; diff --git a/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql b/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql new file mode 100644 index 000000000000..9e34db2e45c4 --- /dev/null +++ b/metastore/scripts/upgrade/mysql/037-HIVE-14637.mysql.sql @@ -0,0 +1,15 @@ +alter table `TBLS` ADD COLUMN `MM_WATERMARK_WRITE_ID` bigint(20) DEFAULT -1; +alter table `TBLS` ADD COLUMN `MM_NEXT_WRITE_ID` bigint(20) DEFAULT 0; + +CREATE TABLE IF NOT EXISTS `TBL_WRITES` +( + `TW_ID` BIGINT NOT NULL, + `TBL_ID` BIGINT NOT NULL, + `WRITE_ID` BIGINT NOT NULL, + `STATE` CHAR(1) NOT NULL, + `CREATED` BIGINT NOT NULL, + `LAST_HEARTBEAT` BIGINT NOT NULL, + PRIMARY KEY (`TW_ID`), + UNIQUE KEY `UNIQUEWRITE` (`TBL_ID`,`WRITE_ID`), + CONSTRAINT `TBL_WRITES_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; diff --git a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql index 91e221d8db06..3e73008f1de5 100644 --- a/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql +++ b/metastore/scripts/upgrade/mysql/hive-schema-2.2.0.mysql.sql @@ -587,6 +587,8 @@ CREATE TABLE IF NOT EXISTS `TBLS` ( `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, `VIEW_EXPANDED_TEXT` mediumtext, `VIEW_ORIGINAL_TEXT` mediumtext, + `MM_WATERMARK_WRITE_ID` bigint(20) DEFAULT -1, + `MM_NEXT_WRITE_ID` bigint(20) DEFAULT 0, PRIMARY KEY (`TBL_ID`), UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), KEY `TBLS_N50` (`SD_ID`), @@ -827,6 +829,20 @@ CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE; +CREATE TABLE IF NOT EXISTS `TBL_WRITES` +( + `TW_ID` BIGINT NOT NULL, + `TBL_ID` BIGINT NOT NULL, + `WRITE_ID` BIGINT NOT NULL, + `STATE` CHAR(1) NOT NULL, + `CREATED` BIGINT NOT NULL, + `LAST_HEARTBEAT` BIGINT NOT NULL, + PRIMARY KEY (`TW_ID`), + UNIQUE KEY `UNIQUEWRITE` (`TBL_ID`,`WRITE_ID`), + CONSTRAINT `TBL_WRITES_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + -- ---------------------------- -- Transaction and Lock Tables -- ---------------------------- diff --git a/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql index de38b58dbe08..6ac1b8931142 100644 --- a/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql +++ b/metastore/scripts/upgrade/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql @@ -1,5 +1,7 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' '; +SOURCE 037-HIVE-14637.mysql.sql; + UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS ' '; diff --git a/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql b/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql new file mode 100644 index 000000000000..218eefe07583 --- /dev/null +++ b/metastore/scripts/upgrade/oracle/037-HIVE-14637.oracle.sql @@ -0,0 +1,15 @@ +ALTER TABLE TBLS ADD MM_WATERMARK_WRITE_ID NUMBER DEFAULT -1; +ALTER TABLE TBLS ADD MM_NEXT_WRITE_ID NUMBER DEFAULT 0; + +CREATE TABLE TBL_WRITES +( + TW_ID NUMBER NOT NULL, + TBL_ID NUMBER NOT NULL, + WRITE_ID NUMBER NOT NULL, + STATE CHAR(1) NOT NULL, + CREATED NUMBER NOT NULL, + LAST_HEARTBEAT NUMBER NOT NULL +); +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; +CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID); diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql index 39ba7cb3b5e8..5479712ef109 100644 --- a/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/hive-schema-2.2.0.oracle.sql @@ -375,7 +375,9 @@ CREATE TABLE TBLS TBL_NAME VARCHAR2(128) NULL, TBL_TYPE VARCHAR2(128) NULL, VIEW_EXPANDED_TEXT CLOB NULL, - VIEW_ORIGINAL_TEXT CLOB NULL + VIEW_ORIGINAL_TEXT CLOB NULL, + MM_WATERMARK_WRITE_ID NUMBER DEFAULT -1, + MM_NEXT_WRITE_ID NUMBER DEFAULT 0 ); ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); @@ -797,6 +799,21 @@ ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAIN CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); +CREATE TABLE TBL_WRITES +( + TW_ID NUMBER NOT NULL, + TBL_ID NUMBER NOT NULL, + WRITE_ID NUMBER NOT NULL, + STATE CHAR(1) NOT NULL, + CREATED NUMBER NOT NULL, + LAST_HEARTBEAT NUMBER NOT NULL +); + +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_PK PRIMARY KEY (TW_ID); + +ALTER TABLE TBL_WRITES ADD CONSTRAINT TBL_WRITES_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE UNIQUE INDEX UNIQUEWRITE ON TBL_WRITES (TBL_ID, WRITE_ID); ------------------------------ -- Transaction and lock tables diff --git a/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql index 66784a4e0ec2..8d841d6f7ea5 100644 --- a/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/upgrade-2.1.0-to-2.2.0.oracle.sql @@ -1,4 +1,6 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual; +@037-HIVE-14637.oracle.sql; + UPDATE VERSION SET SCHEMA_VERSION='2.2.0', VERSION_COMMENT='Hive release version 2.2.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0' AS Status from dual; diff --git a/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql b/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql new file mode 100644 index 000000000000..310f51ec4321 --- /dev/null +++ b/metastore/scripts/upgrade/postgres/036-HIVE-14637.postgres.sql @@ -0,0 +1,16 @@ + +ALTER TABLE "TBLS" ADD COLUMN "MM_WATERMARK_WRITE_ID" bigint DEFAULT -1; +ALTER TABLE "TBLS" ADD COLUMN "MM_NEXT_WRITE_ID" bigint DEFAULT 0; + +CREATE TABLE "TBL_WRITES" +( + "TW_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL, + "WRITE_ID" BIGINT NOT NULL, + "STATE" CHAR(1) NOT NULL, + "CREATED" BIGINT NOT NULL, + "LAST_HEARTBEAT" BIGINT NOT NULL +); +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE; +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "UNIQUEWRITE" UNIQUE ("TBL_ID", "WRITE_ID"); diff --git a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql index 63ac3befc2b4..bc865edfd53d 100644 --- a/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql +++ b/metastore/scripts/upgrade/postgres/hive-schema-2.2.0.postgres.sql @@ -372,7 +372,9 @@ CREATE TABLE "TBLS" ( "TBL_NAME" character varying(128) DEFAULT NULL::character varying, "TBL_TYPE" character varying(128) DEFAULT NULL::character varying, "VIEW_EXPANDED_TEXT" text, - "VIEW_ORIGINAL_TEXT" text + "VIEW_ORIGINAL_TEXT" text, + "MM_WATERMARK_WRITE_ID" bigint DEFAULT -1, + "MM_NEXT_WRITE_ID" bigint DEFAULT 0 ); @@ -604,6 +606,25 @@ CREATE TABLE "KEY_CONSTRAINTS" CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID"); + + +CREATE TABLE "TBL_WRITES" +( + "TW_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL, + "WRITE_ID" BIGINT NOT NULL, + "STATE" CHAR(1) NOT NULL, + "CREATED" BIGINT NOT NULL, + "LAST_HEARTBEAT" BIGINT NOT NULL +); + +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_PK" PRIMARY KEY ("TW_ID"); + +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "TBL_WRITES_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE; + +ALTER TABLE ONLY "TBL_WRITES" ADD CONSTRAINT "UNIQUEWRITE" UNIQUE ("TBL_ID", "WRITE_ID"); + + -- -- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: -- diff --git a/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql index 0b4591d5aabf..70542b8a6e6a 100644 --- a/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql +++ b/metastore/scripts/upgrade/postgres/upgrade-2.1.0-to-2.2.0.postgres.sql @@ -1,5 +1,7 @@ SELECT 'Upgrading MetaStore schema from 2.1.0 to 2.2.0'; +\i 036-HIVE-14637.postgres.sql; + UPDATE "VERSION" SET "SCHEMA_VERSION"='2.2.0', "VERSION_COMMENT"='Hive release version 2.2.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 2.1.0 to 2.2.0'; diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index b4a05b2e61a4..27985f672da3 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size781; - ::apache::thrift::protocol::TType _etype784; - xfer += iprot->readListBegin(_etype784, _size781); - this->success.resize(_size781); - uint32_t _i785; - for (_i785 = 0; _i785 < _size781; ++_i785) + uint32_t _size803; + ::apache::thrift::protocol::TType _etype806; + xfer += iprot->readListBegin(_etype806, _size803); + this->success.resize(_size803); + uint32_t _i807; + for (_i807 = 0; _i807 < _size803; ++_i807) { - xfer += iprot->readString(this->success[_i785]); + xfer += iprot->readString(this->success[_i807]); } xfer += iprot->readListEnd(); } @@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter786; - for (_iter786 = this->success.begin(); _iter786 != this->success.end(); ++_iter786) + std::vector ::const_iterator _iter808; + for (_iter808 = this->success.begin(); _iter808 != this->success.end(); ++_iter808) { - xfer += oprot->writeString((*_iter786)); + xfer += oprot->writeString((*_iter808)); } xfer += oprot->writeListEnd(); } @@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size787; - ::apache::thrift::protocol::TType _etype790; - xfer += iprot->readListBegin(_etype790, _size787); - (*(this->success)).resize(_size787); - uint32_t _i791; - for (_i791 = 0; _i791 < _size787; ++_i791) + uint32_t _size809; + ::apache::thrift::protocol::TType _etype812; + xfer += iprot->readListBegin(_etype812, _size809); + (*(this->success)).resize(_size809); + uint32_t _i813; + for (_i813 = 0; _i813 < _size809; ++_i813) { - xfer += iprot->readString((*(this->success))[_i791]); + xfer += iprot->readString((*(this->success))[_i813]); } xfer += iprot->readListEnd(); } @@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size792; - ::apache::thrift::protocol::TType _etype795; - xfer += iprot->readListBegin(_etype795, _size792); - this->success.resize(_size792); - uint32_t _i796; - for (_i796 = 0; _i796 < _size792; ++_i796) + uint32_t _size814; + ::apache::thrift::protocol::TType _etype817; + xfer += iprot->readListBegin(_etype817, _size814); + this->success.resize(_size814); + uint32_t _i818; + for (_i818 = 0; _i818 < _size814; ++_i818) { - xfer += iprot->readString(this->success[_i796]); + xfer += iprot->readString(this->success[_i818]); } xfer += iprot->readListEnd(); } @@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter797; - for (_iter797 = this->success.begin(); _iter797 != this->success.end(); ++_iter797) + std::vector ::const_iterator _iter819; + for (_iter819 = this->success.begin(); _iter819 != this->success.end(); ++_iter819) { - xfer += oprot->writeString((*_iter797)); + xfer += oprot->writeString((*_iter819)); } xfer += oprot->writeListEnd(); } @@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size798; - ::apache::thrift::protocol::TType _etype801; - xfer += iprot->readListBegin(_etype801, _size798); - (*(this->success)).resize(_size798); - uint32_t _i802; - for (_i802 = 0; _i802 < _size798; ++_i802) + uint32_t _size820; + ::apache::thrift::protocol::TType _etype823; + xfer += iprot->readListBegin(_etype823, _size820); + (*(this->success)).resize(_size820); + uint32_t _i824; + for (_i824 = 0; _i824 < _size820; ++_i824) { - xfer += iprot->readString((*(this->success))[_i802]); + xfer += iprot->readString((*(this->success))[_i824]); } xfer += iprot->readListEnd(); } @@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size803; - ::apache::thrift::protocol::TType _ktype804; - ::apache::thrift::protocol::TType _vtype805; - xfer += iprot->readMapBegin(_ktype804, _vtype805, _size803); - uint32_t _i807; - for (_i807 = 0; _i807 < _size803; ++_i807) + uint32_t _size825; + ::apache::thrift::protocol::TType _ktype826; + ::apache::thrift::protocol::TType _vtype827; + xfer += iprot->readMapBegin(_ktype826, _vtype827, _size825); + uint32_t _i829; + for (_i829 = 0; _i829 < _size825; ++_i829) { - std::string _key808; - xfer += iprot->readString(_key808); - Type& _val809 = this->success[_key808]; - xfer += _val809.read(iprot); + std::string _key830; + xfer += iprot->readString(_key830); + Type& _val831 = this->success[_key830]; + xfer += _val831.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter810; - for (_iter810 = this->success.begin(); _iter810 != this->success.end(); ++_iter810) + std::map ::const_iterator _iter832; + for (_iter832 = this->success.begin(); _iter832 != this->success.end(); ++_iter832) { - xfer += oprot->writeString(_iter810->first); - xfer += _iter810->second.write(oprot); + xfer += oprot->writeString(_iter832->first); + xfer += _iter832->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size811; - ::apache::thrift::protocol::TType _ktype812; - ::apache::thrift::protocol::TType _vtype813; - xfer += iprot->readMapBegin(_ktype812, _vtype813, _size811); - uint32_t _i815; - for (_i815 = 0; _i815 < _size811; ++_i815) + uint32_t _size833; + ::apache::thrift::protocol::TType _ktype834; + ::apache::thrift::protocol::TType _vtype835; + xfer += iprot->readMapBegin(_ktype834, _vtype835, _size833); + uint32_t _i837; + for (_i837 = 0; _i837 < _size833; ++_i837) { - std::string _key816; - xfer += iprot->readString(_key816); - Type& _val817 = (*(this->success))[_key816]; - xfer += _val817.read(iprot); + std::string _key838; + xfer += iprot->readString(_key838); + Type& _val839 = (*(this->success))[_key838]; + xfer += _val839.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size818; - ::apache::thrift::protocol::TType _etype821; - xfer += iprot->readListBegin(_etype821, _size818); - this->success.resize(_size818); - uint32_t _i822; - for (_i822 = 0; _i822 < _size818; ++_i822) + uint32_t _size840; + ::apache::thrift::protocol::TType _etype843; + xfer += iprot->readListBegin(_etype843, _size840); + this->success.resize(_size840); + uint32_t _i844; + for (_i844 = 0; _i844 < _size840; ++_i844) { - xfer += this->success[_i822].read(iprot); + xfer += this->success[_i844].read(iprot); } xfer += iprot->readListEnd(); } @@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter823; - for (_iter823 = this->success.begin(); _iter823 != this->success.end(); ++_iter823) + std::vector ::const_iterator _iter845; + for (_iter845 = this->success.begin(); _iter845 != this->success.end(); ++_iter845) { - xfer += (*_iter823).write(oprot); + xfer += (*_iter845).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size824; - ::apache::thrift::protocol::TType _etype827; - xfer += iprot->readListBegin(_etype827, _size824); - (*(this->success)).resize(_size824); - uint32_t _i828; - for (_i828 = 0; _i828 < _size824; ++_i828) + uint32_t _size846; + ::apache::thrift::protocol::TType _etype849; + xfer += iprot->readListBegin(_etype849, _size846); + (*(this->success)).resize(_size846); + uint32_t _i850; + for (_i850 = 0; _i850 < _size846; ++_i850) { - xfer += (*(this->success))[_i828].read(iprot); + xfer += (*(this->success))[_i850].read(iprot); } xfer += iprot->readListEnd(); } @@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size829; - ::apache::thrift::protocol::TType _etype832; - xfer += iprot->readListBegin(_etype832, _size829); - this->success.resize(_size829); - uint32_t _i833; - for (_i833 = 0; _i833 < _size829; ++_i833) + uint32_t _size851; + ::apache::thrift::protocol::TType _etype854; + xfer += iprot->readListBegin(_etype854, _size851); + this->success.resize(_size851); + uint32_t _i855; + for (_i855 = 0; _i855 < _size851; ++_i855) { - xfer += this->success[_i833].read(iprot); + xfer += this->success[_i855].read(iprot); } xfer += iprot->readListEnd(); } @@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter834; - for (_iter834 = this->success.begin(); _iter834 != this->success.end(); ++_iter834) + std::vector ::const_iterator _iter856; + for (_iter856 = this->success.begin(); _iter856 != this->success.end(); ++_iter856) { - xfer += (*_iter834).write(oprot); + xfer += (*_iter856).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size835; - ::apache::thrift::protocol::TType _etype838; - xfer += iprot->readListBegin(_etype838, _size835); - (*(this->success)).resize(_size835); - uint32_t _i839; - for (_i839 = 0; _i839 < _size835; ++_i839) + uint32_t _size857; + ::apache::thrift::protocol::TType _etype860; + xfer += iprot->readListBegin(_etype860, _size857); + (*(this->success)).resize(_size857); + uint32_t _i861; + for (_i861 = 0; _i861 < _size857; ++_i861) { - xfer += (*(this->success))[_i839].read(iprot); + xfer += (*(this->success))[_i861].read(iprot); } xfer += iprot->readListEnd(); } @@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size840; - ::apache::thrift::protocol::TType _etype843; - xfer += iprot->readListBegin(_etype843, _size840); - this->success.resize(_size840); - uint32_t _i844; - for (_i844 = 0; _i844 < _size840; ++_i844) + uint32_t _size862; + ::apache::thrift::protocol::TType _etype865; + xfer += iprot->readListBegin(_etype865, _size862); + this->success.resize(_size862); + uint32_t _i866; + for (_i866 = 0; _i866 < _size862; ++_i866) { - xfer += this->success[_i844].read(iprot); + xfer += this->success[_i866].read(iprot); } xfer += iprot->readListEnd(); } @@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter845; - for (_iter845 = this->success.begin(); _iter845 != this->success.end(); ++_iter845) + std::vector ::const_iterator _iter867; + for (_iter867 = this->success.begin(); _iter867 != this->success.end(); ++_iter867) { - xfer += (*_iter845).write(oprot); + xfer += (*_iter867).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size846; - ::apache::thrift::protocol::TType _etype849; - xfer += iprot->readListBegin(_etype849, _size846); - (*(this->success)).resize(_size846); - uint32_t _i850; - for (_i850 = 0; _i850 < _size846; ++_i850) + uint32_t _size868; + ::apache::thrift::protocol::TType _etype871; + xfer += iprot->readListBegin(_etype871, _size868); + (*(this->success)).resize(_size868); + uint32_t _i872; + for (_i872 = 0; _i872 < _size868; ++_i872) { - xfer += (*(this->success))[_i850].read(iprot); + xfer += (*(this->success))[_i872].read(iprot); } xfer += iprot->readListEnd(); } @@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size851; - ::apache::thrift::protocol::TType _etype854; - xfer += iprot->readListBegin(_etype854, _size851); - this->success.resize(_size851); - uint32_t _i855; - for (_i855 = 0; _i855 < _size851; ++_i855) + uint32_t _size873; + ::apache::thrift::protocol::TType _etype876; + xfer += iprot->readListBegin(_etype876, _size873); + this->success.resize(_size873); + uint32_t _i877; + for (_i877 = 0; _i877 < _size873; ++_i877) { - xfer += this->success[_i855].read(iprot); + xfer += this->success[_i877].read(iprot); } xfer += iprot->readListEnd(); } @@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter856; - for (_iter856 = this->success.begin(); _iter856 != this->success.end(); ++_iter856) + std::vector ::const_iterator _iter878; + for (_iter878 = this->success.begin(); _iter878 != this->success.end(); ++_iter878) { - xfer += (*_iter856).write(oprot); + xfer += (*_iter878).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size857; - ::apache::thrift::protocol::TType _etype860; - xfer += iprot->readListBegin(_etype860, _size857); - (*(this->success)).resize(_size857); - uint32_t _i861; - for (_i861 = 0; _i861 < _size857; ++_i861) + uint32_t _size879; + ::apache::thrift::protocol::TType _etype882; + xfer += iprot->readListBegin(_etype882, _size879); + (*(this->success)).resize(_size879); + uint32_t _i883; + for (_i883 = 0; _i883 < _size879; ++_i883) { - xfer += (*(this->success))[_i861].read(iprot); + xfer += (*(this->success))[_i883].read(iprot); } xfer += iprot->readListEnd(); } @@ -4518,14 +4518,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size862; - ::apache::thrift::protocol::TType _etype865; - xfer += iprot->readListBegin(_etype865, _size862); - this->primaryKeys.resize(_size862); - uint32_t _i866; - for (_i866 = 0; _i866 < _size862; ++_i866) + uint32_t _size884; + ::apache::thrift::protocol::TType _etype887; + xfer += iprot->readListBegin(_etype887, _size884); + this->primaryKeys.resize(_size884); + uint32_t _i888; + for (_i888 = 0; _i888 < _size884; ++_i888) { - xfer += this->primaryKeys[_i866].read(iprot); + xfer += this->primaryKeys[_i888].read(iprot); } xfer += iprot->readListEnd(); } @@ -4538,14 +4538,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size867; - ::apache::thrift::protocol::TType _etype870; - xfer += iprot->readListBegin(_etype870, _size867); - this->foreignKeys.resize(_size867); - uint32_t _i871; - for (_i871 = 0; _i871 < _size867; ++_i871) + uint32_t _size889; + ::apache::thrift::protocol::TType _etype892; + xfer += iprot->readListBegin(_etype892, _size889); + this->foreignKeys.resize(_size889); + uint32_t _i893; + for (_i893 = 0; _i893 < _size889; ++_i893) { - xfer += this->foreignKeys[_i871].read(iprot); + xfer += this->foreignKeys[_i893].read(iprot); } xfer += iprot->readListEnd(); } @@ -4578,10 +4578,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter872; - for (_iter872 = this->primaryKeys.begin(); _iter872 != this->primaryKeys.end(); ++_iter872) + std::vector ::const_iterator _iter894; + for (_iter894 = this->primaryKeys.begin(); _iter894 != this->primaryKeys.end(); ++_iter894) { - xfer += (*_iter872).write(oprot); + xfer += (*_iter894).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4590,10 +4590,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter873; - for (_iter873 = this->foreignKeys.begin(); _iter873 != this->foreignKeys.end(); ++_iter873) + std::vector ::const_iterator _iter895; + for (_iter895 = this->foreignKeys.begin(); _iter895 != this->foreignKeys.end(); ++_iter895) { - xfer += (*_iter873).write(oprot); + xfer += (*_iter895).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4621,10 +4621,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter874; - for (_iter874 = (*(this->primaryKeys)).begin(); _iter874 != (*(this->primaryKeys)).end(); ++_iter874) + std::vector ::const_iterator _iter896; + for (_iter896 = (*(this->primaryKeys)).begin(); _iter896 != (*(this->primaryKeys)).end(); ++_iter896) { - xfer += (*_iter874).write(oprot); + xfer += (*_iter896).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4633,10 +4633,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter875; - for (_iter875 = (*(this->foreignKeys)).begin(); _iter875 != (*(this->foreignKeys)).end(); ++_iter875) + std::vector ::const_iterator _iter897; + for (_iter897 = (*(this->foreignKeys)).begin(); _iter897 != (*(this->foreignKeys)).end(); ++_iter897) { - xfer += (*_iter875).write(oprot); + xfer += (*_iter897).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6055,14 +6055,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size876; - ::apache::thrift::protocol::TType _etype879; - xfer += iprot->readListBegin(_etype879, _size876); - this->success.resize(_size876); - uint32_t _i880; - for (_i880 = 0; _i880 < _size876; ++_i880) + uint32_t _size898; + ::apache::thrift::protocol::TType _etype901; + xfer += iprot->readListBegin(_etype901, _size898); + this->success.resize(_size898); + uint32_t _i902; + for (_i902 = 0; _i902 < _size898; ++_i902) { - xfer += iprot->readString(this->success[_i880]); + xfer += iprot->readString(this->success[_i902]); } xfer += iprot->readListEnd(); } @@ -6101,10 +6101,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter881; - for (_iter881 = this->success.begin(); _iter881 != this->success.end(); ++_iter881) + std::vector ::const_iterator _iter903; + for (_iter903 = this->success.begin(); _iter903 != this->success.end(); ++_iter903) { - xfer += oprot->writeString((*_iter881)); + xfer += oprot->writeString((*_iter903)); } xfer += oprot->writeListEnd(); } @@ -6149,14 +6149,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size882; - ::apache::thrift::protocol::TType _etype885; - xfer += iprot->readListBegin(_etype885, _size882); - (*(this->success)).resize(_size882); - uint32_t _i886; - for (_i886 = 0; _i886 < _size882; ++_i886) + uint32_t _size904; + ::apache::thrift::protocol::TType _etype907; + xfer += iprot->readListBegin(_etype907, _size904); + (*(this->success)).resize(_size904); + uint32_t _i908; + for (_i908 = 0; _i908 < _size904; ++_i908) { - xfer += iprot->readString((*(this->success))[_i886]); + xfer += iprot->readString((*(this->success))[_i908]); } xfer += iprot->readListEnd(); } @@ -6326,14 +6326,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size887; - ::apache::thrift::protocol::TType _etype890; - xfer += iprot->readListBegin(_etype890, _size887); - this->success.resize(_size887); - uint32_t _i891; - for (_i891 = 0; _i891 < _size887; ++_i891) + uint32_t _size909; + ::apache::thrift::protocol::TType _etype912; + xfer += iprot->readListBegin(_etype912, _size909); + this->success.resize(_size909); + uint32_t _i913; + for (_i913 = 0; _i913 < _size909; ++_i913) { - xfer += iprot->readString(this->success[_i891]); + xfer += iprot->readString(this->success[_i913]); } xfer += iprot->readListEnd(); } @@ -6372,10 +6372,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter892; - for (_iter892 = this->success.begin(); _iter892 != this->success.end(); ++_iter892) + std::vector ::const_iterator _iter914; + for (_iter914 = this->success.begin(); _iter914 != this->success.end(); ++_iter914) { - xfer += oprot->writeString((*_iter892)); + xfer += oprot->writeString((*_iter914)); } xfer += oprot->writeListEnd(); } @@ -6420,14 +6420,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size893; - ::apache::thrift::protocol::TType _etype896; - xfer += iprot->readListBegin(_etype896, _size893); - (*(this->success)).resize(_size893); - uint32_t _i897; - for (_i897 = 0; _i897 < _size893; ++_i897) + uint32_t _size915; + ::apache::thrift::protocol::TType _etype918; + xfer += iprot->readListBegin(_etype918, _size915); + (*(this->success)).resize(_size915); + uint32_t _i919; + for (_i919 = 0; _i919 < _size915; ++_i919) { - xfer += iprot->readString((*(this->success))[_i897]); + xfer += iprot->readString((*(this->success))[_i919]); } xfer += iprot->readListEnd(); } @@ -6502,14 +6502,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size898; - ::apache::thrift::protocol::TType _etype901; - xfer += iprot->readListBegin(_etype901, _size898); - this->tbl_types.resize(_size898); - uint32_t _i902; - for (_i902 = 0; _i902 < _size898; ++_i902) + uint32_t _size920; + ::apache::thrift::protocol::TType _etype923; + xfer += iprot->readListBegin(_etype923, _size920); + this->tbl_types.resize(_size920); + uint32_t _i924; + for (_i924 = 0; _i924 < _size920; ++_i924) { - xfer += iprot->readString(this->tbl_types[_i902]); + xfer += iprot->readString(this->tbl_types[_i924]); } xfer += iprot->readListEnd(); } @@ -6546,10 +6546,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter903; - for (_iter903 = this->tbl_types.begin(); _iter903 != this->tbl_types.end(); ++_iter903) + std::vector ::const_iterator _iter925; + for (_iter925 = this->tbl_types.begin(); _iter925 != this->tbl_types.end(); ++_iter925) { - xfer += oprot->writeString((*_iter903)); + xfer += oprot->writeString((*_iter925)); } xfer += oprot->writeListEnd(); } @@ -6581,10 +6581,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter904; - for (_iter904 = (*(this->tbl_types)).begin(); _iter904 != (*(this->tbl_types)).end(); ++_iter904) + std::vector ::const_iterator _iter926; + for (_iter926 = (*(this->tbl_types)).begin(); _iter926 != (*(this->tbl_types)).end(); ++_iter926) { - xfer += oprot->writeString((*_iter904)); + xfer += oprot->writeString((*_iter926)); } xfer += oprot->writeListEnd(); } @@ -6625,14 +6625,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size905; - ::apache::thrift::protocol::TType _etype908; - xfer += iprot->readListBegin(_etype908, _size905); - this->success.resize(_size905); - uint32_t _i909; - for (_i909 = 0; _i909 < _size905; ++_i909) + uint32_t _size927; + ::apache::thrift::protocol::TType _etype930; + xfer += iprot->readListBegin(_etype930, _size927); + this->success.resize(_size927); + uint32_t _i931; + for (_i931 = 0; _i931 < _size927; ++_i931) { - xfer += this->success[_i909].read(iprot); + xfer += this->success[_i931].read(iprot); } xfer += iprot->readListEnd(); } @@ -6671,10 +6671,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter910; - for (_iter910 = this->success.begin(); _iter910 != this->success.end(); ++_iter910) + std::vector ::const_iterator _iter932; + for (_iter932 = this->success.begin(); _iter932 != this->success.end(); ++_iter932) { - xfer += (*_iter910).write(oprot); + xfer += (*_iter932).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6719,14 +6719,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size911; - ::apache::thrift::protocol::TType _etype914; - xfer += iprot->readListBegin(_etype914, _size911); - (*(this->success)).resize(_size911); - uint32_t _i915; - for (_i915 = 0; _i915 < _size911; ++_i915) + uint32_t _size933; + ::apache::thrift::protocol::TType _etype936; + xfer += iprot->readListBegin(_etype936, _size933); + (*(this->success)).resize(_size933); + uint32_t _i937; + for (_i937 = 0; _i937 < _size933; ++_i937) { - xfer += (*(this->success))[_i915].read(iprot); + xfer += (*(this->success))[_i937].read(iprot); } xfer += iprot->readListEnd(); } @@ -6864,14 +6864,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size916; - ::apache::thrift::protocol::TType _etype919; - xfer += iprot->readListBegin(_etype919, _size916); - this->success.resize(_size916); - uint32_t _i920; - for (_i920 = 0; _i920 < _size916; ++_i920) + uint32_t _size938; + ::apache::thrift::protocol::TType _etype941; + xfer += iprot->readListBegin(_etype941, _size938); + this->success.resize(_size938); + uint32_t _i942; + for (_i942 = 0; _i942 < _size938; ++_i942) { - xfer += iprot->readString(this->success[_i920]); + xfer += iprot->readString(this->success[_i942]); } xfer += iprot->readListEnd(); } @@ -6910,10 +6910,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter921; - for (_iter921 = this->success.begin(); _iter921 != this->success.end(); ++_iter921) + std::vector ::const_iterator _iter943; + for (_iter943 = this->success.begin(); _iter943 != this->success.end(); ++_iter943) { - xfer += oprot->writeString((*_iter921)); + xfer += oprot->writeString((*_iter943)); } xfer += oprot->writeListEnd(); } @@ -6958,14 +6958,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size922; - ::apache::thrift::protocol::TType _etype925; - xfer += iprot->readListBegin(_etype925, _size922); - (*(this->success)).resize(_size922); - uint32_t _i926; - for (_i926 = 0; _i926 < _size922; ++_i926) + uint32_t _size944; + ::apache::thrift::protocol::TType _etype947; + xfer += iprot->readListBegin(_etype947, _size944); + (*(this->success)).resize(_size944); + uint32_t _i948; + for (_i948 = 0; _i948 < _size944; ++_i948) { - xfer += iprot->readString((*(this->success))[_i926]); + xfer += iprot->readString((*(this->success))[_i948]); } xfer += iprot->readListEnd(); } @@ -7275,14 +7275,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size927; - ::apache::thrift::protocol::TType _etype930; - xfer += iprot->readListBegin(_etype930, _size927); - this->tbl_names.resize(_size927); - uint32_t _i931; - for (_i931 = 0; _i931 < _size927; ++_i931) + uint32_t _size949; + ::apache::thrift::protocol::TType _etype952; + xfer += iprot->readListBegin(_etype952, _size949); + this->tbl_names.resize(_size949); + uint32_t _i953; + for (_i953 = 0; _i953 < _size949; ++_i953) { - xfer += iprot->readString(this->tbl_names[_i931]); + xfer += iprot->readString(this->tbl_names[_i953]); } xfer += iprot->readListEnd(); } @@ -7315,10 +7315,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter932; - for (_iter932 = this->tbl_names.begin(); _iter932 != this->tbl_names.end(); ++_iter932) + std::vector ::const_iterator _iter954; + for (_iter954 = this->tbl_names.begin(); _iter954 != this->tbl_names.end(); ++_iter954) { - xfer += oprot->writeString((*_iter932)); + xfer += oprot->writeString((*_iter954)); } xfer += oprot->writeListEnd(); } @@ -7346,10 +7346,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter933; - for (_iter933 = (*(this->tbl_names)).begin(); _iter933 != (*(this->tbl_names)).end(); ++_iter933) + std::vector ::const_iterator _iter955; + for (_iter955 = (*(this->tbl_names)).begin(); _iter955 != (*(this->tbl_names)).end(); ++_iter955) { - xfer += oprot->writeString((*_iter933)); + xfer += oprot->writeString((*_iter955)); } xfer += oprot->writeListEnd(); } @@ -7390,14 +7390,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size934; - ::apache::thrift::protocol::TType _etype937; - xfer += iprot->readListBegin(_etype937, _size934); - this->success.resize(_size934); - uint32_t _i938; - for (_i938 = 0; _i938 < _size934; ++_i938) + uint32_t _size956; + ::apache::thrift::protocol::TType _etype959; + xfer += iprot->readListBegin(_etype959, _size956); + this->success.resize(_size956); + uint32_t _i960; + for (_i960 = 0; _i960 < _size956; ++_i960) { - xfer += this->success[_i938].read(iprot); + xfer += this->success[_i960].read(iprot); } xfer += iprot->readListEnd(); } @@ -7452,10 +7452,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter939; - for (_iter939 = this->success.begin(); _iter939 != this->success.end(); ++_iter939) + std::vector
::const_iterator _iter961; + for (_iter961 = this->success.begin(); _iter961 != this->success.end(); ++_iter961) { - xfer += (*_iter939).write(oprot); + xfer += (*_iter961).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7508,14 +7508,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size940; - ::apache::thrift::protocol::TType _etype943; - xfer += iprot->readListBegin(_etype943, _size940); - (*(this->success)).resize(_size940); - uint32_t _i944; - for (_i944 = 0; _i944 < _size940; ++_i944) + uint32_t _size962; + ::apache::thrift::protocol::TType _etype965; + xfer += iprot->readListBegin(_etype965, _size962); + (*(this->success)).resize(_size962); + uint32_t _i966; + for (_i966 = 0; _i966 < _size962; ++_i966) { - xfer += (*(this->success))[_i944].read(iprot); + xfer += (*(this->success))[_i966].read(iprot); } xfer += iprot->readListEnd(); } @@ -7701,14 +7701,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size945; - ::apache::thrift::protocol::TType _etype948; - xfer += iprot->readListBegin(_etype948, _size945); - this->success.resize(_size945); - uint32_t _i949; - for (_i949 = 0; _i949 < _size945; ++_i949) + uint32_t _size967; + ::apache::thrift::protocol::TType _etype970; + xfer += iprot->readListBegin(_etype970, _size967); + this->success.resize(_size967); + uint32_t _i971; + for (_i971 = 0; _i971 < _size967; ++_i971) { - xfer += iprot->readString(this->success[_i949]); + xfer += iprot->readString(this->success[_i971]); } xfer += iprot->readListEnd(); } @@ -7763,10 +7763,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter950; - for (_iter950 = this->success.begin(); _iter950 != this->success.end(); ++_iter950) + std::vector ::const_iterator _iter972; + for (_iter972 = this->success.begin(); _iter972 != this->success.end(); ++_iter972) { - xfer += oprot->writeString((*_iter950)); + xfer += oprot->writeString((*_iter972)); } xfer += oprot->writeListEnd(); } @@ -7819,14 +7819,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size951; - ::apache::thrift::protocol::TType _etype954; - xfer += iprot->readListBegin(_etype954, _size951); - (*(this->success)).resize(_size951); - uint32_t _i955; - for (_i955 = 0; _i955 < _size951; ++_i955) + uint32_t _size973; + ::apache::thrift::protocol::TType _etype976; + xfer += iprot->readListBegin(_etype976, _size973); + (*(this->success)).resize(_size973); + uint32_t _i977; + for (_i977 = 0; _i977 < _size973; ++_i977) { - xfer += iprot->readString((*(this->success))[_i955]); + xfer += iprot->readString((*(this->success))[_i977]); } xfer += iprot->readListEnd(); } @@ -9160,14 +9160,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size956; - ::apache::thrift::protocol::TType _etype959; - xfer += iprot->readListBegin(_etype959, _size956); - this->new_parts.resize(_size956); - uint32_t _i960; - for (_i960 = 0; _i960 < _size956; ++_i960) + uint32_t _size978; + ::apache::thrift::protocol::TType _etype981; + xfer += iprot->readListBegin(_etype981, _size978); + this->new_parts.resize(_size978); + uint32_t _i982; + for (_i982 = 0; _i982 < _size978; ++_i982) { - xfer += this->new_parts[_i960].read(iprot); + xfer += this->new_parts[_i982].read(iprot); } xfer += iprot->readListEnd(); } @@ -9196,10 +9196,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter961; - for (_iter961 = this->new_parts.begin(); _iter961 != this->new_parts.end(); ++_iter961) + std::vector ::const_iterator _iter983; + for (_iter983 = this->new_parts.begin(); _iter983 != this->new_parts.end(); ++_iter983) { - xfer += (*_iter961).write(oprot); + xfer += (*_iter983).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9223,10 +9223,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter962; - for (_iter962 = (*(this->new_parts)).begin(); _iter962 != (*(this->new_parts)).end(); ++_iter962) + std::vector ::const_iterator _iter984; + for (_iter984 = (*(this->new_parts)).begin(); _iter984 != (*(this->new_parts)).end(); ++_iter984) { - xfer += (*_iter962).write(oprot); + xfer += (*_iter984).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9435,14 +9435,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size963; - ::apache::thrift::protocol::TType _etype966; - xfer += iprot->readListBegin(_etype966, _size963); - this->new_parts.resize(_size963); - uint32_t _i967; - for (_i967 = 0; _i967 < _size963; ++_i967) + uint32_t _size985; + ::apache::thrift::protocol::TType _etype988; + xfer += iprot->readListBegin(_etype988, _size985); + this->new_parts.resize(_size985); + uint32_t _i989; + for (_i989 = 0; _i989 < _size985; ++_i989) { - xfer += this->new_parts[_i967].read(iprot); + xfer += this->new_parts[_i989].read(iprot); } xfer += iprot->readListEnd(); } @@ -9471,10 +9471,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter968; - for (_iter968 = this->new_parts.begin(); _iter968 != this->new_parts.end(); ++_iter968) + std::vector ::const_iterator _iter990; + for (_iter990 = this->new_parts.begin(); _iter990 != this->new_parts.end(); ++_iter990) { - xfer += (*_iter968).write(oprot); + xfer += (*_iter990).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9498,10 +9498,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter969; - for (_iter969 = (*(this->new_parts)).begin(); _iter969 != (*(this->new_parts)).end(); ++_iter969) + std::vector ::const_iterator _iter991; + for (_iter991 = (*(this->new_parts)).begin(); _iter991 != (*(this->new_parts)).end(); ++_iter991) { - xfer += (*_iter969).write(oprot); + xfer += (*_iter991).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9726,14 +9726,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size970; - ::apache::thrift::protocol::TType _etype973; - xfer += iprot->readListBegin(_etype973, _size970); - this->part_vals.resize(_size970); - uint32_t _i974; - for (_i974 = 0; _i974 < _size970; ++_i974) + uint32_t _size992; + ::apache::thrift::protocol::TType _etype995; + xfer += iprot->readListBegin(_etype995, _size992); + this->part_vals.resize(_size992); + uint32_t _i996; + for (_i996 = 0; _i996 < _size992; ++_i996) { - xfer += iprot->readString(this->part_vals[_i974]); + xfer += iprot->readString(this->part_vals[_i996]); } xfer += iprot->readListEnd(); } @@ -9770,10 +9770,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter975; - for (_iter975 = this->part_vals.begin(); _iter975 != this->part_vals.end(); ++_iter975) + std::vector ::const_iterator _iter997; + for (_iter997 = this->part_vals.begin(); _iter997 != this->part_vals.end(); ++_iter997) { - xfer += oprot->writeString((*_iter975)); + xfer += oprot->writeString((*_iter997)); } xfer += oprot->writeListEnd(); } @@ -9805,10 +9805,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter976; - for (_iter976 = (*(this->part_vals)).begin(); _iter976 != (*(this->part_vals)).end(); ++_iter976) + std::vector ::const_iterator _iter998; + for (_iter998 = (*(this->part_vals)).begin(); _iter998 != (*(this->part_vals)).end(); ++_iter998) { - xfer += oprot->writeString((*_iter976)); + xfer += oprot->writeString((*_iter998)); } xfer += oprot->writeListEnd(); } @@ -10280,14 +10280,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size977; - ::apache::thrift::protocol::TType _etype980; - xfer += iprot->readListBegin(_etype980, _size977); - this->part_vals.resize(_size977); - uint32_t _i981; - for (_i981 = 0; _i981 < _size977; ++_i981) + uint32_t _size999; + ::apache::thrift::protocol::TType _etype1002; + xfer += iprot->readListBegin(_etype1002, _size999); + this->part_vals.resize(_size999); + uint32_t _i1003; + for (_i1003 = 0; _i1003 < _size999; ++_i1003) { - xfer += iprot->readString(this->part_vals[_i981]); + xfer += iprot->readString(this->part_vals[_i1003]); } xfer += iprot->readListEnd(); } @@ -10332,10 +10332,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter982; - for (_iter982 = this->part_vals.begin(); _iter982 != this->part_vals.end(); ++_iter982) + std::vector ::const_iterator _iter1004; + for (_iter1004 = this->part_vals.begin(); _iter1004 != this->part_vals.end(); ++_iter1004) { - xfer += oprot->writeString((*_iter982)); + xfer += oprot->writeString((*_iter1004)); } xfer += oprot->writeListEnd(); } @@ -10371,10 +10371,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter983; - for (_iter983 = (*(this->part_vals)).begin(); _iter983 != (*(this->part_vals)).end(); ++_iter983) + std::vector ::const_iterator _iter1005; + for (_iter1005 = (*(this->part_vals)).begin(); _iter1005 != (*(this->part_vals)).end(); ++_iter1005) { - xfer += oprot->writeString((*_iter983)); + xfer += oprot->writeString((*_iter1005)); } xfer += oprot->writeListEnd(); } @@ -11177,14 +11177,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size984; - ::apache::thrift::protocol::TType _etype987; - xfer += iprot->readListBegin(_etype987, _size984); - this->part_vals.resize(_size984); - uint32_t _i988; - for (_i988 = 0; _i988 < _size984; ++_i988) + uint32_t _size1006; + ::apache::thrift::protocol::TType _etype1009; + xfer += iprot->readListBegin(_etype1009, _size1006); + this->part_vals.resize(_size1006); + uint32_t _i1010; + for (_i1010 = 0; _i1010 < _size1006; ++_i1010) { - xfer += iprot->readString(this->part_vals[_i988]); + xfer += iprot->readString(this->part_vals[_i1010]); } xfer += iprot->readListEnd(); } @@ -11229,10 +11229,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter989; - for (_iter989 = this->part_vals.begin(); _iter989 != this->part_vals.end(); ++_iter989) + std::vector ::const_iterator _iter1011; + for (_iter1011 = this->part_vals.begin(); _iter1011 != this->part_vals.end(); ++_iter1011) { - xfer += oprot->writeString((*_iter989)); + xfer += oprot->writeString((*_iter1011)); } xfer += oprot->writeListEnd(); } @@ -11268,10 +11268,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter990; - for (_iter990 = (*(this->part_vals)).begin(); _iter990 != (*(this->part_vals)).end(); ++_iter990) + std::vector ::const_iterator _iter1012; + for (_iter1012 = (*(this->part_vals)).begin(); _iter1012 != (*(this->part_vals)).end(); ++_iter1012) { - xfer += oprot->writeString((*_iter990)); + xfer += oprot->writeString((*_iter1012)); } xfer += oprot->writeListEnd(); } @@ -11480,14 +11480,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size991; - ::apache::thrift::protocol::TType _etype994; - xfer += iprot->readListBegin(_etype994, _size991); - this->part_vals.resize(_size991); - uint32_t _i995; - for (_i995 = 0; _i995 < _size991; ++_i995) + uint32_t _size1013; + ::apache::thrift::protocol::TType _etype1016; + xfer += iprot->readListBegin(_etype1016, _size1013); + this->part_vals.resize(_size1013); + uint32_t _i1017; + for (_i1017 = 0; _i1017 < _size1013; ++_i1017) { - xfer += iprot->readString(this->part_vals[_i995]); + xfer += iprot->readString(this->part_vals[_i1017]); } xfer += iprot->readListEnd(); } @@ -11540,10 +11540,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter996; - for (_iter996 = this->part_vals.begin(); _iter996 != this->part_vals.end(); ++_iter996) + std::vector ::const_iterator _iter1018; + for (_iter1018 = this->part_vals.begin(); _iter1018 != this->part_vals.end(); ++_iter1018) { - xfer += oprot->writeString((*_iter996)); + xfer += oprot->writeString((*_iter1018)); } xfer += oprot->writeListEnd(); } @@ -11583,10 +11583,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter997; - for (_iter997 = (*(this->part_vals)).begin(); _iter997 != (*(this->part_vals)).end(); ++_iter997) + std::vector ::const_iterator _iter1019; + for (_iter1019 = (*(this->part_vals)).begin(); _iter1019 != (*(this->part_vals)).end(); ++_iter1019) { - xfer += oprot->writeString((*_iter997)); + xfer += oprot->writeString((*_iter1019)); } xfer += oprot->writeListEnd(); } @@ -12592,14 +12592,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size998; - ::apache::thrift::protocol::TType _etype1001; - xfer += iprot->readListBegin(_etype1001, _size998); - this->part_vals.resize(_size998); - uint32_t _i1002; - for (_i1002 = 0; _i1002 < _size998; ++_i1002) + uint32_t _size1020; + ::apache::thrift::protocol::TType _etype1023; + xfer += iprot->readListBegin(_etype1023, _size1020); + this->part_vals.resize(_size1020); + uint32_t _i1024; + for (_i1024 = 0; _i1024 < _size1020; ++_i1024) { - xfer += iprot->readString(this->part_vals[_i1002]); + xfer += iprot->readString(this->part_vals[_i1024]); } xfer += iprot->readListEnd(); } @@ -12636,10 +12636,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1003; - for (_iter1003 = this->part_vals.begin(); _iter1003 != this->part_vals.end(); ++_iter1003) + std::vector ::const_iterator _iter1025; + for (_iter1025 = this->part_vals.begin(); _iter1025 != this->part_vals.end(); ++_iter1025) { - xfer += oprot->writeString((*_iter1003)); + xfer += oprot->writeString((*_iter1025)); } xfer += oprot->writeListEnd(); } @@ -12671,10 +12671,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1004; - for (_iter1004 = (*(this->part_vals)).begin(); _iter1004 != (*(this->part_vals)).end(); ++_iter1004) + std::vector ::const_iterator _iter1026; + for (_iter1026 = (*(this->part_vals)).begin(); _iter1026 != (*(this->part_vals)).end(); ++_iter1026) { - xfer += oprot->writeString((*_iter1004)); + xfer += oprot->writeString((*_iter1026)); } xfer += oprot->writeListEnd(); } @@ -12863,17 +12863,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1005; - ::apache::thrift::protocol::TType _ktype1006; - ::apache::thrift::protocol::TType _vtype1007; - xfer += iprot->readMapBegin(_ktype1006, _vtype1007, _size1005); - uint32_t _i1009; - for (_i1009 = 0; _i1009 < _size1005; ++_i1009) + uint32_t _size1027; + ::apache::thrift::protocol::TType _ktype1028; + ::apache::thrift::protocol::TType _vtype1029; + xfer += iprot->readMapBegin(_ktype1028, _vtype1029, _size1027); + uint32_t _i1031; + for (_i1031 = 0; _i1031 < _size1027; ++_i1031) { - std::string _key1010; - xfer += iprot->readString(_key1010); - std::string& _val1011 = this->partitionSpecs[_key1010]; - xfer += iprot->readString(_val1011); + std::string _key1032; + xfer += iprot->readString(_key1032); + std::string& _val1033 = this->partitionSpecs[_key1032]; + xfer += iprot->readString(_val1033); } xfer += iprot->readMapEnd(); } @@ -12934,11 +12934,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1012; - for (_iter1012 = this->partitionSpecs.begin(); _iter1012 != this->partitionSpecs.end(); ++_iter1012) + std::map ::const_iterator _iter1034; + for (_iter1034 = this->partitionSpecs.begin(); _iter1034 != this->partitionSpecs.end(); ++_iter1034) { - xfer += oprot->writeString(_iter1012->first); - xfer += oprot->writeString(_iter1012->second); + xfer += oprot->writeString(_iter1034->first); + xfer += oprot->writeString(_iter1034->second); } xfer += oprot->writeMapEnd(); } @@ -12978,11 +12978,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1013; - for (_iter1013 = (*(this->partitionSpecs)).begin(); _iter1013 != (*(this->partitionSpecs)).end(); ++_iter1013) + std::map ::const_iterator _iter1035; + for (_iter1035 = (*(this->partitionSpecs)).begin(); _iter1035 != (*(this->partitionSpecs)).end(); ++_iter1035) { - xfer += oprot->writeString(_iter1013->first); - xfer += oprot->writeString(_iter1013->second); + xfer += oprot->writeString(_iter1035->first); + xfer += oprot->writeString(_iter1035->second); } xfer += oprot->writeMapEnd(); } @@ -13227,17 +13227,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size1014; - ::apache::thrift::protocol::TType _ktype1015; - ::apache::thrift::protocol::TType _vtype1016; - xfer += iprot->readMapBegin(_ktype1015, _vtype1016, _size1014); - uint32_t _i1018; - for (_i1018 = 0; _i1018 < _size1014; ++_i1018) + uint32_t _size1036; + ::apache::thrift::protocol::TType _ktype1037; + ::apache::thrift::protocol::TType _vtype1038; + xfer += iprot->readMapBegin(_ktype1037, _vtype1038, _size1036); + uint32_t _i1040; + for (_i1040 = 0; _i1040 < _size1036; ++_i1040) { - std::string _key1019; - xfer += iprot->readString(_key1019); - std::string& _val1020 = this->partitionSpecs[_key1019]; - xfer += iprot->readString(_val1020); + std::string _key1041; + xfer += iprot->readString(_key1041); + std::string& _val1042 = this->partitionSpecs[_key1041]; + xfer += iprot->readString(_val1042); } xfer += iprot->readMapEnd(); } @@ -13298,11 +13298,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter1021; - for (_iter1021 = this->partitionSpecs.begin(); _iter1021 != this->partitionSpecs.end(); ++_iter1021) + std::map ::const_iterator _iter1043; + for (_iter1043 = this->partitionSpecs.begin(); _iter1043 != this->partitionSpecs.end(); ++_iter1043) { - xfer += oprot->writeString(_iter1021->first); - xfer += oprot->writeString(_iter1021->second); + xfer += oprot->writeString(_iter1043->first); + xfer += oprot->writeString(_iter1043->second); } xfer += oprot->writeMapEnd(); } @@ -13342,11 +13342,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter1022; - for (_iter1022 = (*(this->partitionSpecs)).begin(); _iter1022 != (*(this->partitionSpecs)).end(); ++_iter1022) + std::map ::const_iterator _iter1044; + for (_iter1044 = (*(this->partitionSpecs)).begin(); _iter1044 != (*(this->partitionSpecs)).end(); ++_iter1044) { - xfer += oprot->writeString(_iter1022->first); - xfer += oprot->writeString(_iter1022->second); + xfer += oprot->writeString(_iter1044->first); + xfer += oprot->writeString(_iter1044->second); } xfer += oprot->writeMapEnd(); } @@ -13403,14 +13403,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1023; - ::apache::thrift::protocol::TType _etype1026; - xfer += iprot->readListBegin(_etype1026, _size1023); - this->success.resize(_size1023); - uint32_t _i1027; - for (_i1027 = 0; _i1027 < _size1023; ++_i1027) + uint32_t _size1045; + ::apache::thrift::protocol::TType _etype1048; + xfer += iprot->readListBegin(_etype1048, _size1045); + this->success.resize(_size1045); + uint32_t _i1049; + for (_i1049 = 0; _i1049 < _size1045; ++_i1049) { - xfer += this->success[_i1027].read(iprot); + xfer += this->success[_i1049].read(iprot); } xfer += iprot->readListEnd(); } @@ -13473,10 +13473,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1028; - for (_iter1028 = this->success.begin(); _iter1028 != this->success.end(); ++_iter1028) + std::vector ::const_iterator _iter1050; + for (_iter1050 = this->success.begin(); _iter1050 != this->success.end(); ++_iter1050) { - xfer += (*_iter1028).write(oprot); + xfer += (*_iter1050).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13533,14 +13533,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1029; - ::apache::thrift::protocol::TType _etype1032; - xfer += iprot->readListBegin(_etype1032, _size1029); - (*(this->success)).resize(_size1029); - uint32_t _i1033; - for (_i1033 = 0; _i1033 < _size1029; ++_i1033) + uint32_t _size1051; + ::apache::thrift::protocol::TType _etype1054; + xfer += iprot->readListBegin(_etype1054, _size1051); + (*(this->success)).resize(_size1051); + uint32_t _i1055; + for (_i1055 = 0; _i1055 < _size1051; ++_i1055) { - xfer += (*(this->success))[_i1033].read(iprot); + xfer += (*(this->success))[_i1055].read(iprot); } xfer += iprot->readListEnd(); } @@ -13639,14 +13639,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1034; - ::apache::thrift::protocol::TType _etype1037; - xfer += iprot->readListBegin(_etype1037, _size1034); - this->part_vals.resize(_size1034); - uint32_t _i1038; - for (_i1038 = 0; _i1038 < _size1034; ++_i1038) + uint32_t _size1056; + ::apache::thrift::protocol::TType _etype1059; + xfer += iprot->readListBegin(_etype1059, _size1056); + this->part_vals.resize(_size1056); + uint32_t _i1060; + for (_i1060 = 0; _i1060 < _size1056; ++_i1060) { - xfer += iprot->readString(this->part_vals[_i1038]); + xfer += iprot->readString(this->part_vals[_i1060]); } xfer += iprot->readListEnd(); } @@ -13667,14 +13667,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1039; - ::apache::thrift::protocol::TType _etype1042; - xfer += iprot->readListBegin(_etype1042, _size1039); - this->group_names.resize(_size1039); - uint32_t _i1043; - for (_i1043 = 0; _i1043 < _size1039; ++_i1043) + uint32_t _size1061; + ::apache::thrift::protocol::TType _etype1064; + xfer += iprot->readListBegin(_etype1064, _size1061); + this->group_names.resize(_size1061); + uint32_t _i1065; + for (_i1065 = 0; _i1065 < _size1061; ++_i1065) { - xfer += iprot->readString(this->group_names[_i1043]); + xfer += iprot->readString(this->group_names[_i1065]); } xfer += iprot->readListEnd(); } @@ -13711,10 +13711,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1044; - for (_iter1044 = this->part_vals.begin(); _iter1044 != this->part_vals.end(); ++_iter1044) + std::vector ::const_iterator _iter1066; + for (_iter1066 = this->part_vals.begin(); _iter1066 != this->part_vals.end(); ++_iter1066) { - xfer += oprot->writeString((*_iter1044)); + xfer += oprot->writeString((*_iter1066)); } xfer += oprot->writeListEnd(); } @@ -13727,10 +13727,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1045; - for (_iter1045 = this->group_names.begin(); _iter1045 != this->group_names.end(); ++_iter1045) + std::vector ::const_iterator _iter1067; + for (_iter1067 = this->group_names.begin(); _iter1067 != this->group_names.end(); ++_iter1067) { - xfer += oprot->writeString((*_iter1045)); + xfer += oprot->writeString((*_iter1067)); } xfer += oprot->writeListEnd(); } @@ -13762,10 +13762,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1046; - for (_iter1046 = (*(this->part_vals)).begin(); _iter1046 != (*(this->part_vals)).end(); ++_iter1046) + std::vector ::const_iterator _iter1068; + for (_iter1068 = (*(this->part_vals)).begin(); _iter1068 != (*(this->part_vals)).end(); ++_iter1068) { - xfer += oprot->writeString((*_iter1046)); + xfer += oprot->writeString((*_iter1068)); } xfer += oprot->writeListEnd(); } @@ -13778,10 +13778,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1047; - for (_iter1047 = (*(this->group_names)).begin(); _iter1047 != (*(this->group_names)).end(); ++_iter1047) + std::vector ::const_iterator _iter1069; + for (_iter1069 = (*(this->group_names)).begin(); _iter1069 != (*(this->group_names)).end(); ++_iter1069) { - xfer += oprot->writeString((*_iter1047)); + xfer += oprot->writeString((*_iter1069)); } xfer += oprot->writeListEnd(); } @@ -14340,14 +14340,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1048; - ::apache::thrift::protocol::TType _etype1051; - xfer += iprot->readListBegin(_etype1051, _size1048); - this->success.resize(_size1048); - uint32_t _i1052; - for (_i1052 = 0; _i1052 < _size1048; ++_i1052) + uint32_t _size1070; + ::apache::thrift::protocol::TType _etype1073; + xfer += iprot->readListBegin(_etype1073, _size1070); + this->success.resize(_size1070); + uint32_t _i1074; + for (_i1074 = 0; _i1074 < _size1070; ++_i1074) { - xfer += this->success[_i1052].read(iprot); + xfer += this->success[_i1074].read(iprot); } xfer += iprot->readListEnd(); } @@ -14394,10 +14394,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1053; - for (_iter1053 = this->success.begin(); _iter1053 != this->success.end(); ++_iter1053) + std::vector ::const_iterator _iter1075; + for (_iter1075 = this->success.begin(); _iter1075 != this->success.end(); ++_iter1075) { - xfer += (*_iter1053).write(oprot); + xfer += (*_iter1075).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14446,14 +14446,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1054; - ::apache::thrift::protocol::TType _etype1057; - xfer += iprot->readListBegin(_etype1057, _size1054); - (*(this->success)).resize(_size1054); - uint32_t _i1058; - for (_i1058 = 0; _i1058 < _size1054; ++_i1058) + uint32_t _size1076; + ::apache::thrift::protocol::TType _etype1079; + xfer += iprot->readListBegin(_etype1079, _size1076); + (*(this->success)).resize(_size1076); + uint32_t _i1080; + for (_i1080 = 0; _i1080 < _size1076; ++_i1080) { - xfer += (*(this->success))[_i1058].read(iprot); + xfer += (*(this->success))[_i1080].read(iprot); } xfer += iprot->readListEnd(); } @@ -14552,14 +14552,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1059; - ::apache::thrift::protocol::TType _etype1062; - xfer += iprot->readListBegin(_etype1062, _size1059); - this->group_names.resize(_size1059); - uint32_t _i1063; - for (_i1063 = 0; _i1063 < _size1059; ++_i1063) + uint32_t _size1081; + ::apache::thrift::protocol::TType _etype1084; + xfer += iprot->readListBegin(_etype1084, _size1081); + this->group_names.resize(_size1081); + uint32_t _i1085; + for (_i1085 = 0; _i1085 < _size1081; ++_i1085) { - xfer += iprot->readString(this->group_names[_i1063]); + xfer += iprot->readString(this->group_names[_i1085]); } xfer += iprot->readListEnd(); } @@ -14604,10 +14604,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1064; - for (_iter1064 = this->group_names.begin(); _iter1064 != this->group_names.end(); ++_iter1064) + std::vector ::const_iterator _iter1086; + for (_iter1086 = this->group_names.begin(); _iter1086 != this->group_names.end(); ++_iter1086) { - xfer += oprot->writeString((*_iter1064)); + xfer += oprot->writeString((*_iter1086)); } xfer += oprot->writeListEnd(); } @@ -14647,10 +14647,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1065; - for (_iter1065 = (*(this->group_names)).begin(); _iter1065 != (*(this->group_names)).end(); ++_iter1065) + std::vector ::const_iterator _iter1087; + for (_iter1087 = (*(this->group_names)).begin(); _iter1087 != (*(this->group_names)).end(); ++_iter1087) { - xfer += oprot->writeString((*_iter1065)); + xfer += oprot->writeString((*_iter1087)); } xfer += oprot->writeListEnd(); } @@ -14691,14 +14691,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1066; - ::apache::thrift::protocol::TType _etype1069; - xfer += iprot->readListBegin(_etype1069, _size1066); - this->success.resize(_size1066); - uint32_t _i1070; - for (_i1070 = 0; _i1070 < _size1066; ++_i1070) + uint32_t _size1088; + ::apache::thrift::protocol::TType _etype1091; + xfer += iprot->readListBegin(_etype1091, _size1088); + this->success.resize(_size1088); + uint32_t _i1092; + for (_i1092 = 0; _i1092 < _size1088; ++_i1092) { - xfer += this->success[_i1070].read(iprot); + xfer += this->success[_i1092].read(iprot); } xfer += iprot->readListEnd(); } @@ -14745,10 +14745,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1071; - for (_iter1071 = this->success.begin(); _iter1071 != this->success.end(); ++_iter1071) + std::vector ::const_iterator _iter1093; + for (_iter1093 = this->success.begin(); _iter1093 != this->success.end(); ++_iter1093) { - xfer += (*_iter1071).write(oprot); + xfer += (*_iter1093).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14797,14 +14797,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1072; - ::apache::thrift::protocol::TType _etype1075; - xfer += iprot->readListBegin(_etype1075, _size1072); - (*(this->success)).resize(_size1072); - uint32_t _i1076; - for (_i1076 = 0; _i1076 < _size1072; ++_i1076) + uint32_t _size1094; + ::apache::thrift::protocol::TType _etype1097; + xfer += iprot->readListBegin(_etype1097, _size1094); + (*(this->success)).resize(_size1094); + uint32_t _i1098; + for (_i1098 = 0; _i1098 < _size1094; ++_i1098) { - xfer += (*(this->success))[_i1076].read(iprot); + xfer += (*(this->success))[_i1098].read(iprot); } xfer += iprot->readListEnd(); } @@ -14982,14 +14982,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1077; - ::apache::thrift::protocol::TType _etype1080; - xfer += iprot->readListBegin(_etype1080, _size1077); - this->success.resize(_size1077); - uint32_t _i1081; - for (_i1081 = 0; _i1081 < _size1077; ++_i1081) + uint32_t _size1099; + ::apache::thrift::protocol::TType _etype1102; + xfer += iprot->readListBegin(_etype1102, _size1099); + this->success.resize(_size1099); + uint32_t _i1103; + for (_i1103 = 0; _i1103 < _size1099; ++_i1103) { - xfer += this->success[_i1081].read(iprot); + xfer += this->success[_i1103].read(iprot); } xfer += iprot->readListEnd(); } @@ -15036,10 +15036,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1082; - for (_iter1082 = this->success.begin(); _iter1082 != this->success.end(); ++_iter1082) + std::vector ::const_iterator _iter1104; + for (_iter1104 = this->success.begin(); _iter1104 != this->success.end(); ++_iter1104) { - xfer += (*_iter1082).write(oprot); + xfer += (*_iter1104).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15088,14 +15088,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1083; - ::apache::thrift::protocol::TType _etype1086; - xfer += iprot->readListBegin(_etype1086, _size1083); - (*(this->success)).resize(_size1083); - uint32_t _i1087; - for (_i1087 = 0; _i1087 < _size1083; ++_i1087) + uint32_t _size1105; + ::apache::thrift::protocol::TType _etype1108; + xfer += iprot->readListBegin(_etype1108, _size1105); + (*(this->success)).resize(_size1105); + uint32_t _i1109; + for (_i1109 = 0; _i1109 < _size1105; ++_i1109) { - xfer += (*(this->success))[_i1087].read(iprot); + xfer += (*(this->success))[_i1109].read(iprot); } xfer += iprot->readListEnd(); } @@ -15273,14 +15273,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1088; - ::apache::thrift::protocol::TType _etype1091; - xfer += iprot->readListBegin(_etype1091, _size1088); - this->success.resize(_size1088); - uint32_t _i1092; - for (_i1092 = 0; _i1092 < _size1088; ++_i1092) + uint32_t _size1110; + ::apache::thrift::protocol::TType _etype1113; + xfer += iprot->readListBegin(_etype1113, _size1110); + this->success.resize(_size1110); + uint32_t _i1114; + for (_i1114 = 0; _i1114 < _size1110; ++_i1114) { - xfer += iprot->readString(this->success[_i1092]); + xfer += iprot->readString(this->success[_i1114]); } xfer += iprot->readListEnd(); } @@ -15319,10 +15319,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1093; - for (_iter1093 = this->success.begin(); _iter1093 != this->success.end(); ++_iter1093) + std::vector ::const_iterator _iter1115; + for (_iter1115 = this->success.begin(); _iter1115 != this->success.end(); ++_iter1115) { - xfer += oprot->writeString((*_iter1093)); + xfer += oprot->writeString((*_iter1115)); } xfer += oprot->writeListEnd(); } @@ -15367,14 +15367,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1094; - ::apache::thrift::protocol::TType _etype1097; - xfer += iprot->readListBegin(_etype1097, _size1094); - (*(this->success)).resize(_size1094); - uint32_t _i1098; - for (_i1098 = 0; _i1098 < _size1094; ++_i1098) + uint32_t _size1116; + ::apache::thrift::protocol::TType _etype1119; + xfer += iprot->readListBegin(_etype1119, _size1116); + (*(this->success)).resize(_size1116); + uint32_t _i1120; + for (_i1120 = 0; _i1120 < _size1116; ++_i1120) { - xfer += iprot->readString((*(this->success))[_i1098]); + xfer += iprot->readString((*(this->success))[_i1120]); } xfer += iprot->readListEnd(); } @@ -15449,14 +15449,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1099; - ::apache::thrift::protocol::TType _etype1102; - xfer += iprot->readListBegin(_etype1102, _size1099); - this->part_vals.resize(_size1099); - uint32_t _i1103; - for (_i1103 = 0; _i1103 < _size1099; ++_i1103) + uint32_t _size1121; + ::apache::thrift::protocol::TType _etype1124; + xfer += iprot->readListBegin(_etype1124, _size1121); + this->part_vals.resize(_size1121); + uint32_t _i1125; + for (_i1125 = 0; _i1125 < _size1121; ++_i1125) { - xfer += iprot->readString(this->part_vals[_i1103]); + xfer += iprot->readString(this->part_vals[_i1125]); } xfer += iprot->readListEnd(); } @@ -15501,10 +15501,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1104; - for (_iter1104 = this->part_vals.begin(); _iter1104 != this->part_vals.end(); ++_iter1104) + std::vector ::const_iterator _iter1126; + for (_iter1126 = this->part_vals.begin(); _iter1126 != this->part_vals.end(); ++_iter1126) { - xfer += oprot->writeString((*_iter1104)); + xfer += oprot->writeString((*_iter1126)); } xfer += oprot->writeListEnd(); } @@ -15540,10 +15540,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1105; - for (_iter1105 = (*(this->part_vals)).begin(); _iter1105 != (*(this->part_vals)).end(); ++_iter1105) + std::vector ::const_iterator _iter1127; + for (_iter1127 = (*(this->part_vals)).begin(); _iter1127 != (*(this->part_vals)).end(); ++_iter1127) { - xfer += oprot->writeString((*_iter1105)); + xfer += oprot->writeString((*_iter1127)); } xfer += oprot->writeListEnd(); } @@ -15588,14 +15588,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1106; - ::apache::thrift::protocol::TType _etype1109; - xfer += iprot->readListBegin(_etype1109, _size1106); - this->success.resize(_size1106); - uint32_t _i1110; - for (_i1110 = 0; _i1110 < _size1106; ++_i1110) + uint32_t _size1128; + ::apache::thrift::protocol::TType _etype1131; + xfer += iprot->readListBegin(_etype1131, _size1128); + this->success.resize(_size1128); + uint32_t _i1132; + for (_i1132 = 0; _i1132 < _size1128; ++_i1132) { - xfer += this->success[_i1110].read(iprot); + xfer += this->success[_i1132].read(iprot); } xfer += iprot->readListEnd(); } @@ -15642,10 +15642,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1111; - for (_iter1111 = this->success.begin(); _iter1111 != this->success.end(); ++_iter1111) + std::vector ::const_iterator _iter1133; + for (_iter1133 = this->success.begin(); _iter1133 != this->success.end(); ++_iter1133) { - xfer += (*_iter1111).write(oprot); + xfer += (*_iter1133).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15694,14 +15694,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1112; - ::apache::thrift::protocol::TType _etype1115; - xfer += iprot->readListBegin(_etype1115, _size1112); - (*(this->success)).resize(_size1112); - uint32_t _i1116; - for (_i1116 = 0; _i1116 < _size1112; ++_i1116) + uint32_t _size1134; + ::apache::thrift::protocol::TType _etype1137; + xfer += iprot->readListBegin(_etype1137, _size1134); + (*(this->success)).resize(_size1134); + uint32_t _i1138; + for (_i1138 = 0; _i1138 < _size1134; ++_i1138) { - xfer += (*(this->success))[_i1116].read(iprot); + xfer += (*(this->success))[_i1138].read(iprot); } xfer += iprot->readListEnd(); } @@ -15784,14 +15784,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1117; - ::apache::thrift::protocol::TType _etype1120; - xfer += iprot->readListBegin(_etype1120, _size1117); - this->part_vals.resize(_size1117); - uint32_t _i1121; - for (_i1121 = 0; _i1121 < _size1117; ++_i1121) + uint32_t _size1139; + ::apache::thrift::protocol::TType _etype1142; + xfer += iprot->readListBegin(_etype1142, _size1139); + this->part_vals.resize(_size1139); + uint32_t _i1143; + for (_i1143 = 0; _i1143 < _size1139; ++_i1143) { - xfer += iprot->readString(this->part_vals[_i1121]); + xfer += iprot->readString(this->part_vals[_i1143]); } xfer += iprot->readListEnd(); } @@ -15820,14 +15820,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1122; - ::apache::thrift::protocol::TType _etype1125; - xfer += iprot->readListBegin(_etype1125, _size1122); - this->group_names.resize(_size1122); - uint32_t _i1126; - for (_i1126 = 0; _i1126 < _size1122; ++_i1126) + uint32_t _size1144; + ::apache::thrift::protocol::TType _etype1147; + xfer += iprot->readListBegin(_etype1147, _size1144); + this->group_names.resize(_size1144); + uint32_t _i1148; + for (_i1148 = 0; _i1148 < _size1144; ++_i1148) { - xfer += iprot->readString(this->group_names[_i1126]); + xfer += iprot->readString(this->group_names[_i1148]); } xfer += iprot->readListEnd(); } @@ -15864,10 +15864,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1127; - for (_iter1127 = this->part_vals.begin(); _iter1127 != this->part_vals.end(); ++_iter1127) + std::vector ::const_iterator _iter1149; + for (_iter1149 = this->part_vals.begin(); _iter1149 != this->part_vals.end(); ++_iter1149) { - xfer += oprot->writeString((*_iter1127)); + xfer += oprot->writeString((*_iter1149)); } xfer += oprot->writeListEnd(); } @@ -15884,10 +15884,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1128; - for (_iter1128 = this->group_names.begin(); _iter1128 != this->group_names.end(); ++_iter1128) + std::vector ::const_iterator _iter1150; + for (_iter1150 = this->group_names.begin(); _iter1150 != this->group_names.end(); ++_iter1150) { - xfer += oprot->writeString((*_iter1128)); + xfer += oprot->writeString((*_iter1150)); } xfer += oprot->writeListEnd(); } @@ -15919,10 +15919,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1129; - for (_iter1129 = (*(this->part_vals)).begin(); _iter1129 != (*(this->part_vals)).end(); ++_iter1129) + std::vector ::const_iterator _iter1151; + for (_iter1151 = (*(this->part_vals)).begin(); _iter1151 != (*(this->part_vals)).end(); ++_iter1151) { - xfer += oprot->writeString((*_iter1129)); + xfer += oprot->writeString((*_iter1151)); } xfer += oprot->writeListEnd(); } @@ -15939,10 +15939,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1130; - for (_iter1130 = (*(this->group_names)).begin(); _iter1130 != (*(this->group_names)).end(); ++_iter1130) + std::vector ::const_iterator _iter1152; + for (_iter1152 = (*(this->group_names)).begin(); _iter1152 != (*(this->group_names)).end(); ++_iter1152) { - xfer += oprot->writeString((*_iter1130)); + xfer += oprot->writeString((*_iter1152)); } xfer += oprot->writeListEnd(); } @@ -15983,14 +15983,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1131; - ::apache::thrift::protocol::TType _etype1134; - xfer += iprot->readListBegin(_etype1134, _size1131); - this->success.resize(_size1131); - uint32_t _i1135; - for (_i1135 = 0; _i1135 < _size1131; ++_i1135) + uint32_t _size1153; + ::apache::thrift::protocol::TType _etype1156; + xfer += iprot->readListBegin(_etype1156, _size1153); + this->success.resize(_size1153); + uint32_t _i1157; + for (_i1157 = 0; _i1157 < _size1153; ++_i1157) { - xfer += this->success[_i1135].read(iprot); + xfer += this->success[_i1157].read(iprot); } xfer += iprot->readListEnd(); } @@ -16037,10 +16037,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1136; - for (_iter1136 = this->success.begin(); _iter1136 != this->success.end(); ++_iter1136) + std::vector ::const_iterator _iter1158; + for (_iter1158 = this->success.begin(); _iter1158 != this->success.end(); ++_iter1158) { - xfer += (*_iter1136).write(oprot); + xfer += (*_iter1158).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16089,14 +16089,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1137; - ::apache::thrift::protocol::TType _etype1140; - xfer += iprot->readListBegin(_etype1140, _size1137); - (*(this->success)).resize(_size1137); - uint32_t _i1141; - for (_i1141 = 0; _i1141 < _size1137; ++_i1141) + uint32_t _size1159; + ::apache::thrift::protocol::TType _etype1162; + xfer += iprot->readListBegin(_etype1162, _size1159); + (*(this->success)).resize(_size1159); + uint32_t _i1163; + for (_i1163 = 0; _i1163 < _size1159; ++_i1163) { - xfer += (*(this->success))[_i1141].read(iprot); + xfer += (*(this->success))[_i1163].read(iprot); } xfer += iprot->readListEnd(); } @@ -16179,14 +16179,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1142; - ::apache::thrift::protocol::TType _etype1145; - xfer += iprot->readListBegin(_etype1145, _size1142); - this->part_vals.resize(_size1142); - uint32_t _i1146; - for (_i1146 = 0; _i1146 < _size1142; ++_i1146) + uint32_t _size1164; + ::apache::thrift::protocol::TType _etype1167; + xfer += iprot->readListBegin(_etype1167, _size1164); + this->part_vals.resize(_size1164); + uint32_t _i1168; + for (_i1168 = 0; _i1168 < _size1164; ++_i1168) { - xfer += iprot->readString(this->part_vals[_i1146]); + xfer += iprot->readString(this->part_vals[_i1168]); } xfer += iprot->readListEnd(); } @@ -16231,10 +16231,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1147; - for (_iter1147 = this->part_vals.begin(); _iter1147 != this->part_vals.end(); ++_iter1147) + std::vector ::const_iterator _iter1169; + for (_iter1169 = this->part_vals.begin(); _iter1169 != this->part_vals.end(); ++_iter1169) { - xfer += oprot->writeString((*_iter1147)); + xfer += oprot->writeString((*_iter1169)); } xfer += oprot->writeListEnd(); } @@ -16270,10 +16270,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1148; - for (_iter1148 = (*(this->part_vals)).begin(); _iter1148 != (*(this->part_vals)).end(); ++_iter1148) + std::vector ::const_iterator _iter1170; + for (_iter1170 = (*(this->part_vals)).begin(); _iter1170 != (*(this->part_vals)).end(); ++_iter1170) { - xfer += oprot->writeString((*_iter1148)); + xfer += oprot->writeString((*_iter1170)); } xfer += oprot->writeListEnd(); } @@ -16318,14 +16318,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1149; - ::apache::thrift::protocol::TType _etype1152; - xfer += iprot->readListBegin(_etype1152, _size1149); - this->success.resize(_size1149); - uint32_t _i1153; - for (_i1153 = 0; _i1153 < _size1149; ++_i1153) + uint32_t _size1171; + ::apache::thrift::protocol::TType _etype1174; + xfer += iprot->readListBegin(_etype1174, _size1171); + this->success.resize(_size1171); + uint32_t _i1175; + for (_i1175 = 0; _i1175 < _size1171; ++_i1175) { - xfer += iprot->readString(this->success[_i1153]); + xfer += iprot->readString(this->success[_i1175]); } xfer += iprot->readListEnd(); } @@ -16372,10 +16372,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1154; - for (_iter1154 = this->success.begin(); _iter1154 != this->success.end(); ++_iter1154) + std::vector ::const_iterator _iter1176; + for (_iter1176 = this->success.begin(); _iter1176 != this->success.end(); ++_iter1176) { - xfer += oprot->writeString((*_iter1154)); + xfer += oprot->writeString((*_iter1176)); } xfer += oprot->writeListEnd(); } @@ -16424,14 +16424,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1155; - ::apache::thrift::protocol::TType _etype1158; - xfer += iprot->readListBegin(_etype1158, _size1155); - (*(this->success)).resize(_size1155); - uint32_t _i1159; - for (_i1159 = 0; _i1159 < _size1155; ++_i1159) + uint32_t _size1177; + ::apache::thrift::protocol::TType _etype1180; + xfer += iprot->readListBegin(_etype1180, _size1177); + (*(this->success)).resize(_size1177); + uint32_t _i1181; + for (_i1181 = 0; _i1181 < _size1177; ++_i1181) { - xfer += iprot->readString((*(this->success))[_i1159]); + xfer += iprot->readString((*(this->success))[_i1181]); } xfer += iprot->readListEnd(); } @@ -16625,14 +16625,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1160; - ::apache::thrift::protocol::TType _etype1163; - xfer += iprot->readListBegin(_etype1163, _size1160); - this->success.resize(_size1160); - uint32_t _i1164; - for (_i1164 = 0; _i1164 < _size1160; ++_i1164) + uint32_t _size1182; + ::apache::thrift::protocol::TType _etype1185; + xfer += iprot->readListBegin(_etype1185, _size1182); + this->success.resize(_size1182); + uint32_t _i1186; + for (_i1186 = 0; _i1186 < _size1182; ++_i1186) { - xfer += this->success[_i1164].read(iprot); + xfer += this->success[_i1186].read(iprot); } xfer += iprot->readListEnd(); } @@ -16679,10 +16679,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1165; - for (_iter1165 = this->success.begin(); _iter1165 != this->success.end(); ++_iter1165) + std::vector ::const_iterator _iter1187; + for (_iter1187 = this->success.begin(); _iter1187 != this->success.end(); ++_iter1187) { - xfer += (*_iter1165).write(oprot); + xfer += (*_iter1187).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16731,14 +16731,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1166; - ::apache::thrift::protocol::TType _etype1169; - xfer += iprot->readListBegin(_etype1169, _size1166); - (*(this->success)).resize(_size1166); - uint32_t _i1170; - for (_i1170 = 0; _i1170 < _size1166; ++_i1170) + uint32_t _size1188; + ::apache::thrift::protocol::TType _etype1191; + xfer += iprot->readListBegin(_etype1191, _size1188); + (*(this->success)).resize(_size1188); + uint32_t _i1192; + for (_i1192 = 0; _i1192 < _size1188; ++_i1192) { - xfer += (*(this->success))[_i1170].read(iprot); + xfer += (*(this->success))[_i1192].read(iprot); } xfer += iprot->readListEnd(); } @@ -16932,14 +16932,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1171; - ::apache::thrift::protocol::TType _etype1174; - xfer += iprot->readListBegin(_etype1174, _size1171); - this->success.resize(_size1171); - uint32_t _i1175; - for (_i1175 = 0; _i1175 < _size1171; ++_i1175) + uint32_t _size1193; + ::apache::thrift::protocol::TType _etype1196; + xfer += iprot->readListBegin(_etype1196, _size1193); + this->success.resize(_size1193); + uint32_t _i1197; + for (_i1197 = 0; _i1197 < _size1193; ++_i1197) { - xfer += this->success[_i1175].read(iprot); + xfer += this->success[_i1197].read(iprot); } xfer += iprot->readListEnd(); } @@ -16986,10 +16986,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1176; - for (_iter1176 = this->success.begin(); _iter1176 != this->success.end(); ++_iter1176) + std::vector ::const_iterator _iter1198; + for (_iter1198 = this->success.begin(); _iter1198 != this->success.end(); ++_iter1198) { - xfer += (*_iter1176).write(oprot); + xfer += (*_iter1198).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17038,14 +17038,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1177; - ::apache::thrift::protocol::TType _etype1180; - xfer += iprot->readListBegin(_etype1180, _size1177); - (*(this->success)).resize(_size1177); - uint32_t _i1181; - for (_i1181 = 0; _i1181 < _size1177; ++_i1181) + uint32_t _size1199; + ::apache::thrift::protocol::TType _etype1202; + xfer += iprot->readListBegin(_etype1202, _size1199); + (*(this->success)).resize(_size1199); + uint32_t _i1203; + for (_i1203 = 0; _i1203 < _size1199; ++_i1203) { - xfer += (*(this->success))[_i1181].read(iprot); + xfer += (*(this->success))[_i1203].read(iprot); } xfer += iprot->readListEnd(); } @@ -17614,14 +17614,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1182; - ::apache::thrift::protocol::TType _etype1185; - xfer += iprot->readListBegin(_etype1185, _size1182); - this->names.resize(_size1182); - uint32_t _i1186; - for (_i1186 = 0; _i1186 < _size1182; ++_i1186) + uint32_t _size1204; + ::apache::thrift::protocol::TType _etype1207; + xfer += iprot->readListBegin(_etype1207, _size1204); + this->names.resize(_size1204); + uint32_t _i1208; + for (_i1208 = 0; _i1208 < _size1204; ++_i1208) { - xfer += iprot->readString(this->names[_i1186]); + xfer += iprot->readString(this->names[_i1208]); } xfer += iprot->readListEnd(); } @@ -17658,10 +17658,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1187; - for (_iter1187 = this->names.begin(); _iter1187 != this->names.end(); ++_iter1187) + std::vector ::const_iterator _iter1209; + for (_iter1209 = this->names.begin(); _iter1209 != this->names.end(); ++_iter1209) { - xfer += oprot->writeString((*_iter1187)); + xfer += oprot->writeString((*_iter1209)); } xfer += oprot->writeListEnd(); } @@ -17693,10 +17693,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1188; - for (_iter1188 = (*(this->names)).begin(); _iter1188 != (*(this->names)).end(); ++_iter1188) + std::vector ::const_iterator _iter1210; + for (_iter1210 = (*(this->names)).begin(); _iter1210 != (*(this->names)).end(); ++_iter1210) { - xfer += oprot->writeString((*_iter1188)); + xfer += oprot->writeString((*_iter1210)); } xfer += oprot->writeListEnd(); } @@ -17737,14 +17737,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1189; - ::apache::thrift::protocol::TType _etype1192; - xfer += iprot->readListBegin(_etype1192, _size1189); - this->success.resize(_size1189); - uint32_t _i1193; - for (_i1193 = 0; _i1193 < _size1189; ++_i1193) + uint32_t _size1211; + ::apache::thrift::protocol::TType _etype1214; + xfer += iprot->readListBegin(_etype1214, _size1211); + this->success.resize(_size1211); + uint32_t _i1215; + for (_i1215 = 0; _i1215 < _size1211; ++_i1215) { - xfer += this->success[_i1193].read(iprot); + xfer += this->success[_i1215].read(iprot); } xfer += iprot->readListEnd(); } @@ -17791,10 +17791,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1194; - for (_iter1194 = this->success.begin(); _iter1194 != this->success.end(); ++_iter1194) + std::vector ::const_iterator _iter1216; + for (_iter1216 = this->success.begin(); _iter1216 != this->success.end(); ++_iter1216) { - xfer += (*_iter1194).write(oprot); + xfer += (*_iter1216).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17843,14 +17843,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1195; - ::apache::thrift::protocol::TType _etype1198; - xfer += iprot->readListBegin(_etype1198, _size1195); - (*(this->success)).resize(_size1195); - uint32_t _i1199; - for (_i1199 = 0; _i1199 < _size1195; ++_i1199) + uint32_t _size1217; + ::apache::thrift::protocol::TType _etype1220; + xfer += iprot->readListBegin(_etype1220, _size1217); + (*(this->success)).resize(_size1217); + uint32_t _i1221; + for (_i1221 = 0; _i1221 < _size1217; ++_i1221) { - xfer += (*(this->success))[_i1199].read(iprot); + xfer += (*(this->success))[_i1221].read(iprot); } xfer += iprot->readListEnd(); } @@ -18172,14 +18172,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1200; - ::apache::thrift::protocol::TType _etype1203; - xfer += iprot->readListBegin(_etype1203, _size1200); - this->new_parts.resize(_size1200); - uint32_t _i1204; - for (_i1204 = 0; _i1204 < _size1200; ++_i1204) + uint32_t _size1222; + ::apache::thrift::protocol::TType _etype1225; + xfer += iprot->readListBegin(_etype1225, _size1222); + this->new_parts.resize(_size1222); + uint32_t _i1226; + for (_i1226 = 0; _i1226 < _size1222; ++_i1226) { - xfer += this->new_parts[_i1204].read(iprot); + xfer += this->new_parts[_i1226].read(iprot); } xfer += iprot->readListEnd(); } @@ -18216,10 +18216,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1205; - for (_iter1205 = this->new_parts.begin(); _iter1205 != this->new_parts.end(); ++_iter1205) + std::vector ::const_iterator _iter1227; + for (_iter1227 = this->new_parts.begin(); _iter1227 != this->new_parts.end(); ++_iter1227) { - xfer += (*_iter1205).write(oprot); + xfer += (*_iter1227).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18251,10 +18251,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1206; - for (_iter1206 = (*(this->new_parts)).begin(); _iter1206 != (*(this->new_parts)).end(); ++_iter1206) + std::vector ::const_iterator _iter1228; + for (_iter1228 = (*(this->new_parts)).begin(); _iter1228 != (*(this->new_parts)).end(); ++_iter1228) { - xfer += (*_iter1206).write(oprot); + xfer += (*_iter1228).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18439,14 +18439,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1207; - ::apache::thrift::protocol::TType _etype1210; - xfer += iprot->readListBegin(_etype1210, _size1207); - this->new_parts.resize(_size1207); - uint32_t _i1211; - for (_i1211 = 0; _i1211 < _size1207; ++_i1211) + uint32_t _size1229; + ::apache::thrift::protocol::TType _etype1232; + xfer += iprot->readListBegin(_etype1232, _size1229); + this->new_parts.resize(_size1229); + uint32_t _i1233; + for (_i1233 = 0; _i1233 < _size1229; ++_i1233) { - xfer += this->new_parts[_i1211].read(iprot); + xfer += this->new_parts[_i1233].read(iprot); } xfer += iprot->readListEnd(); } @@ -18491,10 +18491,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1212; - for (_iter1212 = this->new_parts.begin(); _iter1212 != this->new_parts.end(); ++_iter1212) + std::vector ::const_iterator _iter1234; + for (_iter1234 = this->new_parts.begin(); _iter1234 != this->new_parts.end(); ++_iter1234) { - xfer += (*_iter1212).write(oprot); + xfer += (*_iter1234).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18530,10 +18530,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1213; - for (_iter1213 = (*(this->new_parts)).begin(); _iter1213 != (*(this->new_parts)).end(); ++_iter1213) + std::vector ::const_iterator _iter1235; + for (_iter1235 = (*(this->new_parts)).begin(); _iter1235 != (*(this->new_parts)).end(); ++_iter1235) { - xfer += (*_iter1213).write(oprot); + xfer += (*_iter1235).write(oprot); } xfer += oprot->writeListEnd(); } @@ -18977,14 +18977,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1214; - ::apache::thrift::protocol::TType _etype1217; - xfer += iprot->readListBegin(_etype1217, _size1214); - this->part_vals.resize(_size1214); - uint32_t _i1218; - for (_i1218 = 0; _i1218 < _size1214; ++_i1218) + uint32_t _size1236; + ::apache::thrift::protocol::TType _etype1239; + xfer += iprot->readListBegin(_etype1239, _size1236); + this->part_vals.resize(_size1236); + uint32_t _i1240; + for (_i1240 = 0; _i1240 < _size1236; ++_i1240) { - xfer += iprot->readString(this->part_vals[_i1218]); + xfer += iprot->readString(this->part_vals[_i1240]); } xfer += iprot->readListEnd(); } @@ -19029,10 +19029,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1219; - for (_iter1219 = this->part_vals.begin(); _iter1219 != this->part_vals.end(); ++_iter1219) + std::vector ::const_iterator _iter1241; + for (_iter1241 = this->part_vals.begin(); _iter1241 != this->part_vals.end(); ++_iter1241) { - xfer += oprot->writeString((*_iter1219)); + xfer += oprot->writeString((*_iter1241)); } xfer += oprot->writeListEnd(); } @@ -19068,10 +19068,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1220; - for (_iter1220 = (*(this->part_vals)).begin(); _iter1220 != (*(this->part_vals)).end(); ++_iter1220) + std::vector ::const_iterator _iter1242; + for (_iter1242 = (*(this->part_vals)).begin(); _iter1242 != (*(this->part_vals)).end(); ++_iter1242) { - xfer += oprot->writeString((*_iter1220)); + xfer += oprot->writeString((*_iter1242)); } xfer += oprot->writeListEnd(); } @@ -19244,14 +19244,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1221; - ::apache::thrift::protocol::TType _etype1224; - xfer += iprot->readListBegin(_etype1224, _size1221); - this->part_vals.resize(_size1221); - uint32_t _i1225; - for (_i1225 = 0; _i1225 < _size1221; ++_i1225) + uint32_t _size1243; + ::apache::thrift::protocol::TType _etype1246; + xfer += iprot->readListBegin(_etype1246, _size1243); + this->part_vals.resize(_size1243); + uint32_t _i1247; + for (_i1247 = 0; _i1247 < _size1243; ++_i1247) { - xfer += iprot->readString(this->part_vals[_i1225]); + xfer += iprot->readString(this->part_vals[_i1247]); } xfer += iprot->readListEnd(); } @@ -19288,10 +19288,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1226; - for (_iter1226 = this->part_vals.begin(); _iter1226 != this->part_vals.end(); ++_iter1226) + std::vector ::const_iterator _iter1248; + for (_iter1248 = this->part_vals.begin(); _iter1248 != this->part_vals.end(); ++_iter1248) { - xfer += oprot->writeString((*_iter1226)); + xfer += oprot->writeString((*_iter1248)); } xfer += oprot->writeListEnd(); } @@ -19319,10 +19319,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1227; - for (_iter1227 = (*(this->part_vals)).begin(); _iter1227 != (*(this->part_vals)).end(); ++_iter1227) + std::vector ::const_iterator _iter1249; + for (_iter1249 = (*(this->part_vals)).begin(); _iter1249 != (*(this->part_vals)).end(); ++_iter1249) { - xfer += oprot->writeString((*_iter1227)); + xfer += oprot->writeString((*_iter1249)); } xfer += oprot->writeListEnd(); } @@ -19797,14 +19797,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1228; - ::apache::thrift::protocol::TType _etype1231; - xfer += iprot->readListBegin(_etype1231, _size1228); - this->success.resize(_size1228); - uint32_t _i1232; - for (_i1232 = 0; _i1232 < _size1228; ++_i1232) + uint32_t _size1250; + ::apache::thrift::protocol::TType _etype1253; + xfer += iprot->readListBegin(_etype1253, _size1250); + this->success.resize(_size1250); + uint32_t _i1254; + for (_i1254 = 0; _i1254 < _size1250; ++_i1254) { - xfer += iprot->readString(this->success[_i1232]); + xfer += iprot->readString(this->success[_i1254]); } xfer += iprot->readListEnd(); } @@ -19843,10 +19843,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1233; - for (_iter1233 = this->success.begin(); _iter1233 != this->success.end(); ++_iter1233) + std::vector ::const_iterator _iter1255; + for (_iter1255 = this->success.begin(); _iter1255 != this->success.end(); ++_iter1255) { - xfer += oprot->writeString((*_iter1233)); + xfer += oprot->writeString((*_iter1255)); } xfer += oprot->writeListEnd(); } @@ -19891,14 +19891,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1234; - ::apache::thrift::protocol::TType _etype1237; - xfer += iprot->readListBegin(_etype1237, _size1234); - (*(this->success)).resize(_size1234); - uint32_t _i1238; - for (_i1238 = 0; _i1238 < _size1234; ++_i1238) + uint32_t _size1256; + ::apache::thrift::protocol::TType _etype1259; + xfer += iprot->readListBegin(_etype1259, _size1256); + (*(this->success)).resize(_size1256); + uint32_t _i1260; + for (_i1260 = 0; _i1260 < _size1256; ++_i1260) { - xfer += iprot->readString((*(this->success))[_i1238]); + xfer += iprot->readString((*(this->success))[_i1260]); } xfer += iprot->readListEnd(); } @@ -20036,17 +20036,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1239; - ::apache::thrift::protocol::TType _ktype1240; - ::apache::thrift::protocol::TType _vtype1241; - xfer += iprot->readMapBegin(_ktype1240, _vtype1241, _size1239); - uint32_t _i1243; - for (_i1243 = 0; _i1243 < _size1239; ++_i1243) + uint32_t _size1261; + ::apache::thrift::protocol::TType _ktype1262; + ::apache::thrift::protocol::TType _vtype1263; + xfer += iprot->readMapBegin(_ktype1262, _vtype1263, _size1261); + uint32_t _i1265; + for (_i1265 = 0; _i1265 < _size1261; ++_i1265) { - std::string _key1244; - xfer += iprot->readString(_key1244); - std::string& _val1245 = this->success[_key1244]; - xfer += iprot->readString(_val1245); + std::string _key1266; + xfer += iprot->readString(_key1266); + std::string& _val1267 = this->success[_key1266]; + xfer += iprot->readString(_val1267); } xfer += iprot->readMapEnd(); } @@ -20085,11 +20085,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1246; - for (_iter1246 = this->success.begin(); _iter1246 != this->success.end(); ++_iter1246) + std::map ::const_iterator _iter1268; + for (_iter1268 = this->success.begin(); _iter1268 != this->success.end(); ++_iter1268) { - xfer += oprot->writeString(_iter1246->first); - xfer += oprot->writeString(_iter1246->second); + xfer += oprot->writeString(_iter1268->first); + xfer += oprot->writeString(_iter1268->second); } xfer += oprot->writeMapEnd(); } @@ -20134,17 +20134,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1247; - ::apache::thrift::protocol::TType _ktype1248; - ::apache::thrift::protocol::TType _vtype1249; - xfer += iprot->readMapBegin(_ktype1248, _vtype1249, _size1247); - uint32_t _i1251; - for (_i1251 = 0; _i1251 < _size1247; ++_i1251) + uint32_t _size1269; + ::apache::thrift::protocol::TType _ktype1270; + ::apache::thrift::protocol::TType _vtype1271; + xfer += iprot->readMapBegin(_ktype1270, _vtype1271, _size1269); + uint32_t _i1273; + for (_i1273 = 0; _i1273 < _size1269; ++_i1273) { - std::string _key1252; - xfer += iprot->readString(_key1252); - std::string& _val1253 = (*(this->success))[_key1252]; - xfer += iprot->readString(_val1253); + std::string _key1274; + xfer += iprot->readString(_key1274); + std::string& _val1275 = (*(this->success))[_key1274]; + xfer += iprot->readString(_val1275); } xfer += iprot->readMapEnd(); } @@ -20219,17 +20219,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1254; - ::apache::thrift::protocol::TType _ktype1255; - ::apache::thrift::protocol::TType _vtype1256; - xfer += iprot->readMapBegin(_ktype1255, _vtype1256, _size1254); - uint32_t _i1258; - for (_i1258 = 0; _i1258 < _size1254; ++_i1258) + uint32_t _size1276; + ::apache::thrift::protocol::TType _ktype1277; + ::apache::thrift::protocol::TType _vtype1278; + xfer += iprot->readMapBegin(_ktype1277, _vtype1278, _size1276); + uint32_t _i1280; + for (_i1280 = 0; _i1280 < _size1276; ++_i1280) { - std::string _key1259; - xfer += iprot->readString(_key1259); - std::string& _val1260 = this->part_vals[_key1259]; - xfer += iprot->readString(_val1260); + std::string _key1281; + xfer += iprot->readString(_key1281); + std::string& _val1282 = this->part_vals[_key1281]; + xfer += iprot->readString(_val1282); } xfer += iprot->readMapEnd(); } @@ -20240,9 +20240,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1261; - xfer += iprot->readI32(ecast1261); - this->eventType = (PartitionEventType::type)ecast1261; + int32_t ecast1283; + xfer += iprot->readI32(ecast1283); + this->eventType = (PartitionEventType::type)ecast1283; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -20276,11 +20276,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1262; - for (_iter1262 = this->part_vals.begin(); _iter1262 != this->part_vals.end(); ++_iter1262) + std::map ::const_iterator _iter1284; + for (_iter1284 = this->part_vals.begin(); _iter1284 != this->part_vals.end(); ++_iter1284) { - xfer += oprot->writeString(_iter1262->first); - xfer += oprot->writeString(_iter1262->second); + xfer += oprot->writeString(_iter1284->first); + xfer += oprot->writeString(_iter1284->second); } xfer += oprot->writeMapEnd(); } @@ -20316,11 +20316,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1263; - for (_iter1263 = (*(this->part_vals)).begin(); _iter1263 != (*(this->part_vals)).end(); ++_iter1263) + std::map ::const_iterator _iter1285; + for (_iter1285 = (*(this->part_vals)).begin(); _iter1285 != (*(this->part_vals)).end(); ++_iter1285) { - xfer += oprot->writeString(_iter1263->first); - xfer += oprot->writeString(_iter1263->second); + xfer += oprot->writeString(_iter1285->first); + xfer += oprot->writeString(_iter1285->second); } xfer += oprot->writeMapEnd(); } @@ -20589,17 +20589,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1264; - ::apache::thrift::protocol::TType _ktype1265; - ::apache::thrift::protocol::TType _vtype1266; - xfer += iprot->readMapBegin(_ktype1265, _vtype1266, _size1264); - uint32_t _i1268; - for (_i1268 = 0; _i1268 < _size1264; ++_i1268) + uint32_t _size1286; + ::apache::thrift::protocol::TType _ktype1287; + ::apache::thrift::protocol::TType _vtype1288; + xfer += iprot->readMapBegin(_ktype1287, _vtype1288, _size1286); + uint32_t _i1290; + for (_i1290 = 0; _i1290 < _size1286; ++_i1290) { - std::string _key1269; - xfer += iprot->readString(_key1269); - std::string& _val1270 = this->part_vals[_key1269]; - xfer += iprot->readString(_val1270); + std::string _key1291; + xfer += iprot->readString(_key1291); + std::string& _val1292 = this->part_vals[_key1291]; + xfer += iprot->readString(_val1292); } xfer += iprot->readMapEnd(); } @@ -20610,9 +20610,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1271; - xfer += iprot->readI32(ecast1271); - this->eventType = (PartitionEventType::type)ecast1271; + int32_t ecast1293; + xfer += iprot->readI32(ecast1293); + this->eventType = (PartitionEventType::type)ecast1293; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -20646,11 +20646,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1272; - for (_iter1272 = this->part_vals.begin(); _iter1272 != this->part_vals.end(); ++_iter1272) + std::map ::const_iterator _iter1294; + for (_iter1294 = this->part_vals.begin(); _iter1294 != this->part_vals.end(); ++_iter1294) { - xfer += oprot->writeString(_iter1272->first); - xfer += oprot->writeString(_iter1272->second); + xfer += oprot->writeString(_iter1294->first); + xfer += oprot->writeString(_iter1294->second); } xfer += oprot->writeMapEnd(); } @@ -20686,11 +20686,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1273; - for (_iter1273 = (*(this->part_vals)).begin(); _iter1273 != (*(this->part_vals)).end(); ++_iter1273) + std::map ::const_iterator _iter1295; + for (_iter1295 = (*(this->part_vals)).begin(); _iter1295 != (*(this->part_vals)).end(); ++_iter1295) { - xfer += oprot->writeString(_iter1273->first); - xfer += oprot->writeString(_iter1273->second); + xfer += oprot->writeString(_iter1295->first); + xfer += oprot->writeString(_iter1295->second); } xfer += oprot->writeMapEnd(); } @@ -22126,14 +22126,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1274; - ::apache::thrift::protocol::TType _etype1277; - xfer += iprot->readListBegin(_etype1277, _size1274); - this->success.resize(_size1274); - uint32_t _i1278; - for (_i1278 = 0; _i1278 < _size1274; ++_i1278) + uint32_t _size1296; + ::apache::thrift::protocol::TType _etype1299; + xfer += iprot->readListBegin(_etype1299, _size1296); + this->success.resize(_size1296); + uint32_t _i1300; + for (_i1300 = 0; _i1300 < _size1296; ++_i1300) { - xfer += this->success[_i1278].read(iprot); + xfer += this->success[_i1300].read(iprot); } xfer += iprot->readListEnd(); } @@ -22180,10 +22180,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1279; - for (_iter1279 = this->success.begin(); _iter1279 != this->success.end(); ++_iter1279) + std::vector ::const_iterator _iter1301; + for (_iter1301 = this->success.begin(); _iter1301 != this->success.end(); ++_iter1301) { - xfer += (*_iter1279).write(oprot); + xfer += (*_iter1301).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22232,14 +22232,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1280; - ::apache::thrift::protocol::TType _etype1283; - xfer += iprot->readListBegin(_etype1283, _size1280); - (*(this->success)).resize(_size1280); - uint32_t _i1284; - for (_i1284 = 0; _i1284 < _size1280; ++_i1284) + uint32_t _size1302; + ::apache::thrift::protocol::TType _etype1305; + xfer += iprot->readListBegin(_etype1305, _size1302); + (*(this->success)).resize(_size1302); + uint32_t _i1306; + for (_i1306 = 0; _i1306 < _size1302; ++_i1306) { - xfer += (*(this->success))[_i1284].read(iprot); + xfer += (*(this->success))[_i1306].read(iprot); } xfer += iprot->readListEnd(); } @@ -22417,14 +22417,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1285; - ::apache::thrift::protocol::TType _etype1288; - xfer += iprot->readListBegin(_etype1288, _size1285); - this->success.resize(_size1285); - uint32_t _i1289; - for (_i1289 = 0; _i1289 < _size1285; ++_i1289) + uint32_t _size1307; + ::apache::thrift::protocol::TType _etype1310; + xfer += iprot->readListBegin(_etype1310, _size1307); + this->success.resize(_size1307); + uint32_t _i1311; + for (_i1311 = 0; _i1311 < _size1307; ++_i1311) { - xfer += iprot->readString(this->success[_i1289]); + xfer += iprot->readString(this->success[_i1311]); } xfer += iprot->readListEnd(); } @@ -22463,10 +22463,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1290; - for (_iter1290 = this->success.begin(); _iter1290 != this->success.end(); ++_iter1290) + std::vector ::const_iterator _iter1312; + for (_iter1312 = this->success.begin(); _iter1312 != this->success.end(); ++_iter1312) { - xfer += oprot->writeString((*_iter1290)); + xfer += oprot->writeString((*_iter1312)); } xfer += oprot->writeListEnd(); } @@ -22511,14 +22511,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1291; - ::apache::thrift::protocol::TType _etype1294; - xfer += iprot->readListBegin(_etype1294, _size1291); - (*(this->success)).resize(_size1291); - uint32_t _i1295; - for (_i1295 = 0; _i1295 < _size1291; ++_i1295) + uint32_t _size1313; + ::apache::thrift::protocol::TType _etype1316; + xfer += iprot->readListBegin(_etype1316, _size1313); + (*(this->success)).resize(_size1313); + uint32_t _i1317; + for (_i1317 = 0; _i1317 < _size1313; ++_i1317) { - xfer += iprot->readString((*(this->success))[_i1295]); + xfer += iprot->readString((*(this->success))[_i1317]); } xfer += iprot->readListEnd(); } @@ -26545,14 +26545,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1296; - ::apache::thrift::protocol::TType _etype1299; - xfer += iprot->readListBegin(_etype1299, _size1296); - this->success.resize(_size1296); - uint32_t _i1300; - for (_i1300 = 0; _i1300 < _size1296; ++_i1300) + uint32_t _size1318; + ::apache::thrift::protocol::TType _etype1321; + xfer += iprot->readListBegin(_etype1321, _size1318); + this->success.resize(_size1318); + uint32_t _i1322; + for (_i1322 = 0; _i1322 < _size1318; ++_i1322) { - xfer += iprot->readString(this->success[_i1300]); + xfer += iprot->readString(this->success[_i1322]); } xfer += iprot->readListEnd(); } @@ -26591,10 +26591,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1301; - for (_iter1301 = this->success.begin(); _iter1301 != this->success.end(); ++_iter1301) + std::vector ::const_iterator _iter1323; + for (_iter1323 = this->success.begin(); _iter1323 != this->success.end(); ++_iter1323) { - xfer += oprot->writeString((*_iter1301)); + xfer += oprot->writeString((*_iter1323)); } xfer += oprot->writeListEnd(); } @@ -26639,14 +26639,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1302; - ::apache::thrift::protocol::TType _etype1305; - xfer += iprot->readListBegin(_etype1305, _size1302); - (*(this->success)).resize(_size1302); - uint32_t _i1306; - for (_i1306 = 0; _i1306 < _size1302; ++_i1306) + uint32_t _size1324; + ::apache::thrift::protocol::TType _etype1327; + xfer += iprot->readListBegin(_etype1327, _size1324); + (*(this->success)).resize(_size1324); + uint32_t _i1328; + for (_i1328 = 0; _i1328 < _size1324; ++_i1328) { - xfer += iprot->readString((*(this->success))[_i1306]); + xfer += iprot->readString((*(this->success))[_i1328]); } xfer += iprot->readListEnd(); } @@ -27606,14 +27606,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1307; - ::apache::thrift::protocol::TType _etype1310; - xfer += iprot->readListBegin(_etype1310, _size1307); - this->success.resize(_size1307); - uint32_t _i1311; - for (_i1311 = 0; _i1311 < _size1307; ++_i1311) + uint32_t _size1329; + ::apache::thrift::protocol::TType _etype1332; + xfer += iprot->readListBegin(_etype1332, _size1329); + this->success.resize(_size1329); + uint32_t _i1333; + for (_i1333 = 0; _i1333 < _size1329; ++_i1333) { - xfer += iprot->readString(this->success[_i1311]); + xfer += iprot->readString(this->success[_i1333]); } xfer += iprot->readListEnd(); } @@ -27652,10 +27652,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1312; - for (_iter1312 = this->success.begin(); _iter1312 != this->success.end(); ++_iter1312) + std::vector ::const_iterator _iter1334; + for (_iter1334 = this->success.begin(); _iter1334 != this->success.end(); ++_iter1334) { - xfer += oprot->writeString((*_iter1312)); + xfer += oprot->writeString((*_iter1334)); } xfer += oprot->writeListEnd(); } @@ -27700,14 +27700,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1313; - ::apache::thrift::protocol::TType _etype1316; - xfer += iprot->readListBegin(_etype1316, _size1313); - (*(this->success)).resize(_size1313); - uint32_t _i1317; - for (_i1317 = 0; _i1317 < _size1313; ++_i1317) + uint32_t _size1335; + ::apache::thrift::protocol::TType _etype1338; + xfer += iprot->readListBegin(_etype1338, _size1335); + (*(this->success)).resize(_size1335); + uint32_t _i1339; + for (_i1339 = 0; _i1339 < _size1335; ++_i1339) { - xfer += iprot->readString((*(this->success))[_i1317]); + xfer += iprot->readString((*(this->success))[_i1339]); } xfer += iprot->readListEnd(); } @@ -27780,9 +27780,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1318; - xfer += iprot->readI32(ecast1318); - this->principal_type = (PrincipalType::type)ecast1318; + int32_t ecast1340; + xfer += iprot->readI32(ecast1340); + this->principal_type = (PrincipalType::type)ecast1340; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -27798,9 +27798,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1319; - xfer += iprot->readI32(ecast1319); - this->grantorType = (PrincipalType::type)ecast1319; + int32_t ecast1341; + xfer += iprot->readI32(ecast1341); + this->grantorType = (PrincipalType::type)ecast1341; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -28071,9 +28071,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1320; - xfer += iprot->readI32(ecast1320); - this->principal_type = (PrincipalType::type)ecast1320; + int32_t ecast1342; + xfer += iprot->readI32(ecast1342); + this->principal_type = (PrincipalType::type)ecast1342; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -28304,9 +28304,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1321; - xfer += iprot->readI32(ecast1321); - this->principal_type = (PrincipalType::type)ecast1321; + int32_t ecast1343; + xfer += iprot->readI32(ecast1343); + this->principal_type = (PrincipalType::type)ecast1343; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -28395,14 +28395,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1322; - ::apache::thrift::protocol::TType _etype1325; - xfer += iprot->readListBegin(_etype1325, _size1322); - this->success.resize(_size1322); - uint32_t _i1326; - for (_i1326 = 0; _i1326 < _size1322; ++_i1326) + uint32_t _size1344; + ::apache::thrift::protocol::TType _etype1347; + xfer += iprot->readListBegin(_etype1347, _size1344); + this->success.resize(_size1344); + uint32_t _i1348; + for (_i1348 = 0; _i1348 < _size1344; ++_i1348) { - xfer += this->success[_i1326].read(iprot); + xfer += this->success[_i1348].read(iprot); } xfer += iprot->readListEnd(); } @@ -28441,10 +28441,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1327; - for (_iter1327 = this->success.begin(); _iter1327 != this->success.end(); ++_iter1327) + std::vector ::const_iterator _iter1349; + for (_iter1349 = this->success.begin(); _iter1349 != this->success.end(); ++_iter1349) { - xfer += (*_iter1327).write(oprot); + xfer += (*_iter1349).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28489,14 +28489,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1328; - ::apache::thrift::protocol::TType _etype1331; - xfer += iprot->readListBegin(_etype1331, _size1328); - (*(this->success)).resize(_size1328); - uint32_t _i1332; - for (_i1332 = 0; _i1332 < _size1328; ++_i1332) + uint32_t _size1350; + ::apache::thrift::protocol::TType _etype1353; + xfer += iprot->readListBegin(_etype1353, _size1350); + (*(this->success)).resize(_size1350); + uint32_t _i1354; + for (_i1354 = 0; _i1354 < _size1350; ++_i1354) { - xfer += (*(this->success))[_i1332].read(iprot); + xfer += (*(this->success))[_i1354].read(iprot); } xfer += iprot->readListEnd(); } @@ -29192,14 +29192,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1333; - ::apache::thrift::protocol::TType _etype1336; - xfer += iprot->readListBegin(_etype1336, _size1333); - this->group_names.resize(_size1333); - uint32_t _i1337; - for (_i1337 = 0; _i1337 < _size1333; ++_i1337) + uint32_t _size1355; + ::apache::thrift::protocol::TType _etype1358; + xfer += iprot->readListBegin(_etype1358, _size1355); + this->group_names.resize(_size1355); + uint32_t _i1359; + for (_i1359 = 0; _i1359 < _size1355; ++_i1359) { - xfer += iprot->readString(this->group_names[_i1337]); + xfer += iprot->readString(this->group_names[_i1359]); } xfer += iprot->readListEnd(); } @@ -29236,10 +29236,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1338; - for (_iter1338 = this->group_names.begin(); _iter1338 != this->group_names.end(); ++_iter1338) + std::vector ::const_iterator _iter1360; + for (_iter1360 = this->group_names.begin(); _iter1360 != this->group_names.end(); ++_iter1360) { - xfer += oprot->writeString((*_iter1338)); + xfer += oprot->writeString((*_iter1360)); } xfer += oprot->writeListEnd(); } @@ -29271,10 +29271,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1339; - for (_iter1339 = (*(this->group_names)).begin(); _iter1339 != (*(this->group_names)).end(); ++_iter1339) + std::vector ::const_iterator _iter1361; + for (_iter1361 = (*(this->group_names)).begin(); _iter1361 != (*(this->group_names)).end(); ++_iter1361) { - xfer += oprot->writeString((*_iter1339)); + xfer += oprot->writeString((*_iter1361)); } xfer += oprot->writeListEnd(); } @@ -29449,9 +29449,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1340; - xfer += iprot->readI32(ecast1340); - this->principal_type = (PrincipalType::type)ecast1340; + int32_t ecast1362; + xfer += iprot->readI32(ecast1362); + this->principal_type = (PrincipalType::type)ecast1362; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -29556,14 +29556,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1341; - ::apache::thrift::protocol::TType _etype1344; - xfer += iprot->readListBegin(_etype1344, _size1341); - this->success.resize(_size1341); - uint32_t _i1345; - for (_i1345 = 0; _i1345 < _size1341; ++_i1345) + uint32_t _size1363; + ::apache::thrift::protocol::TType _etype1366; + xfer += iprot->readListBegin(_etype1366, _size1363); + this->success.resize(_size1363); + uint32_t _i1367; + for (_i1367 = 0; _i1367 < _size1363; ++_i1367) { - xfer += this->success[_i1345].read(iprot); + xfer += this->success[_i1367].read(iprot); } xfer += iprot->readListEnd(); } @@ -29602,10 +29602,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1346; - for (_iter1346 = this->success.begin(); _iter1346 != this->success.end(); ++_iter1346) + std::vector ::const_iterator _iter1368; + for (_iter1368 = this->success.begin(); _iter1368 != this->success.end(); ++_iter1368) { - xfer += (*_iter1346).write(oprot); + xfer += (*_iter1368).write(oprot); } xfer += oprot->writeListEnd(); } @@ -29650,14 +29650,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1347; - ::apache::thrift::protocol::TType _etype1350; - xfer += iprot->readListBegin(_etype1350, _size1347); - (*(this->success)).resize(_size1347); - uint32_t _i1351; - for (_i1351 = 0; _i1351 < _size1347; ++_i1351) + uint32_t _size1369; + ::apache::thrift::protocol::TType _etype1372; + xfer += iprot->readListBegin(_etype1372, _size1369); + (*(this->success)).resize(_size1369); + uint32_t _i1373; + for (_i1373 = 0; _i1373 < _size1369; ++_i1373) { - xfer += (*(this->success))[_i1351].read(iprot); + xfer += (*(this->success))[_i1373].read(iprot); } xfer += iprot->readListEnd(); } @@ -30345,14 +30345,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1352; - ::apache::thrift::protocol::TType _etype1355; - xfer += iprot->readListBegin(_etype1355, _size1352); - this->group_names.resize(_size1352); - uint32_t _i1356; - for (_i1356 = 0; _i1356 < _size1352; ++_i1356) + uint32_t _size1374; + ::apache::thrift::protocol::TType _etype1377; + xfer += iprot->readListBegin(_etype1377, _size1374); + this->group_names.resize(_size1374); + uint32_t _i1378; + for (_i1378 = 0; _i1378 < _size1374; ++_i1378) { - xfer += iprot->readString(this->group_names[_i1356]); + xfer += iprot->readString(this->group_names[_i1378]); } xfer += iprot->readListEnd(); } @@ -30385,10 +30385,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1357; - for (_iter1357 = this->group_names.begin(); _iter1357 != this->group_names.end(); ++_iter1357) + std::vector ::const_iterator _iter1379; + for (_iter1379 = this->group_names.begin(); _iter1379 != this->group_names.end(); ++_iter1379) { - xfer += oprot->writeString((*_iter1357)); + xfer += oprot->writeString((*_iter1379)); } xfer += oprot->writeListEnd(); } @@ -30416,10 +30416,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1358; - for (_iter1358 = (*(this->group_names)).begin(); _iter1358 != (*(this->group_names)).end(); ++_iter1358) + std::vector ::const_iterator _iter1380; + for (_iter1380 = (*(this->group_names)).begin(); _iter1380 != (*(this->group_names)).end(); ++_iter1380) { - xfer += oprot->writeString((*_iter1358)); + xfer += oprot->writeString((*_iter1380)); } xfer += oprot->writeListEnd(); } @@ -30460,14 +30460,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1359; - ::apache::thrift::protocol::TType _etype1362; - xfer += iprot->readListBegin(_etype1362, _size1359); - this->success.resize(_size1359); - uint32_t _i1363; - for (_i1363 = 0; _i1363 < _size1359; ++_i1363) + uint32_t _size1381; + ::apache::thrift::protocol::TType _etype1384; + xfer += iprot->readListBegin(_etype1384, _size1381); + this->success.resize(_size1381); + uint32_t _i1385; + for (_i1385 = 0; _i1385 < _size1381; ++_i1385) { - xfer += iprot->readString(this->success[_i1363]); + xfer += iprot->readString(this->success[_i1385]); } xfer += iprot->readListEnd(); } @@ -30506,10 +30506,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1364; - for (_iter1364 = this->success.begin(); _iter1364 != this->success.end(); ++_iter1364) + std::vector ::const_iterator _iter1386; + for (_iter1386 = this->success.begin(); _iter1386 != this->success.end(); ++_iter1386) { - xfer += oprot->writeString((*_iter1364)); + xfer += oprot->writeString((*_iter1386)); } xfer += oprot->writeListEnd(); } @@ -30554,14 +30554,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1365; - ::apache::thrift::protocol::TType _etype1368; - xfer += iprot->readListBegin(_etype1368, _size1365); - (*(this->success)).resize(_size1365); - uint32_t _i1369; - for (_i1369 = 0; _i1369 < _size1365; ++_i1369) + uint32_t _size1387; + ::apache::thrift::protocol::TType _etype1390; + xfer += iprot->readListBegin(_etype1390, _size1387); + (*(this->success)).resize(_size1387); + uint32_t _i1391; + for (_i1391 = 0; _i1391 < _size1387; ++_i1391) { - xfer += iprot->readString((*(this->success))[_i1369]); + xfer += iprot->readString((*(this->success))[_i1391]); } xfer += iprot->readListEnd(); } @@ -31872,14 +31872,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1370; - ::apache::thrift::protocol::TType _etype1373; - xfer += iprot->readListBegin(_etype1373, _size1370); - this->success.resize(_size1370); - uint32_t _i1374; - for (_i1374 = 0; _i1374 < _size1370; ++_i1374) + uint32_t _size1392; + ::apache::thrift::protocol::TType _etype1395; + xfer += iprot->readListBegin(_etype1395, _size1392); + this->success.resize(_size1392); + uint32_t _i1396; + for (_i1396 = 0; _i1396 < _size1392; ++_i1396) { - xfer += iprot->readString(this->success[_i1374]); + xfer += iprot->readString(this->success[_i1396]); } xfer += iprot->readListEnd(); } @@ -31910,10 +31910,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1375; - for (_iter1375 = this->success.begin(); _iter1375 != this->success.end(); ++_iter1375) + std::vector ::const_iterator _iter1397; + for (_iter1397 = this->success.begin(); _iter1397 != this->success.end(); ++_iter1397) { - xfer += oprot->writeString((*_iter1375)); + xfer += oprot->writeString((*_iter1397)); } xfer += oprot->writeListEnd(); } @@ -31954,14 +31954,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1376; - ::apache::thrift::protocol::TType _etype1379; - xfer += iprot->readListBegin(_etype1379, _size1376); - (*(this->success)).resize(_size1376); - uint32_t _i1380; - for (_i1380 = 0; _i1380 < _size1376; ++_i1380) + uint32_t _size1398; + ::apache::thrift::protocol::TType _etype1401; + xfer += iprot->readListBegin(_etype1401, _size1398); + (*(this->success)).resize(_size1398); + uint32_t _i1402; + for (_i1402 = 0; _i1402 < _size1398; ++_i1402) { - xfer += iprot->readString((*(this->success))[_i1380]); + xfer += iprot->readString((*(this->success))[_i1402]); } xfer += iprot->readListEnd(); } @@ -32687,14 +32687,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1381; - ::apache::thrift::protocol::TType _etype1384; - xfer += iprot->readListBegin(_etype1384, _size1381); - this->success.resize(_size1381); - uint32_t _i1385; - for (_i1385 = 0; _i1385 < _size1381; ++_i1385) + uint32_t _size1403; + ::apache::thrift::protocol::TType _etype1406; + xfer += iprot->readListBegin(_etype1406, _size1403); + this->success.resize(_size1403); + uint32_t _i1407; + for (_i1407 = 0; _i1407 < _size1403; ++_i1407) { - xfer += iprot->readString(this->success[_i1385]); + xfer += iprot->readString(this->success[_i1407]); } xfer += iprot->readListEnd(); } @@ -32725,10 +32725,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1386; - for (_iter1386 = this->success.begin(); _iter1386 != this->success.end(); ++_iter1386) + std::vector ::const_iterator _iter1408; + for (_iter1408 = this->success.begin(); _iter1408 != this->success.end(); ++_iter1408) { - xfer += oprot->writeString((*_iter1386)); + xfer += oprot->writeString((*_iter1408)); } xfer += oprot->writeListEnd(); } @@ -32769,14 +32769,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1387; - ::apache::thrift::protocol::TType _etype1390; - xfer += iprot->readListBegin(_etype1390, _size1387); - (*(this->success)).resize(_size1387); - uint32_t _i1391; - for (_i1391 = 0; _i1391 < _size1387; ++_i1391) + uint32_t _size1409; + ::apache::thrift::protocol::TType _etype1412; + xfer += iprot->readListBegin(_etype1412, _size1409); + (*(this->success)).resize(_size1409); + uint32_t _i1413; + for (_i1413 = 0; _i1413 < _size1409; ++_i1413) { - xfer += iprot->readString((*(this->success))[_i1391]); + xfer += iprot->readString((*(this->success))[_i1413]); } xfer += iprot->readListEnd(); } @@ -37339,387 +37339,1135 @@ uint32_t ThriftHiveMetastore_cache_file_metadata_presult::read(::apache::thrift: return xfer; } -void ThriftHiveMetastoreClient::getMetaConf(std::string& _return, const std::string& key) -{ - send_getMetaConf(key); - recv_getMetaConf(_return); + +ThriftHiveMetastore_get_next_write_id_args::~ThriftHiveMetastore_get_next_write_id_args() throw() { } -void ThriftHiveMetastoreClient::send_getMetaConf(const std::string& key) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_getMetaConf_pargs args; - args.key = &key; - args.write(oprot_); +uint32_t ThriftHiveMetastore_get_next_write_id_args::read(::apache::thrift::protocol::TProtocol* iprot) { - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; -void ThriftHiveMetastoreClient::recv_getMetaConf(std::string& _return) -{ + xfer += iprot->readStructBegin(fname); - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + using ::apache::thrift::protocol::TProtocolException; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("getMetaConf") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_getMetaConf_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getMetaConf failed: unknown result"); -} -void ThriftHiveMetastoreClient::setMetaConf(const std::string& key, const std::string& value) -{ - send_setMetaConf(key, value); - recv_setMetaConf(); + xfer += iprot->readStructEnd(); + + return xfer; } -void ThriftHiveMetastoreClient::send_setMetaConf(const std::string& key, const std::string& value) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); +uint32_t ThriftHiveMetastore_get_next_write_id_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_write_id_args"); - ThriftHiveMetastore_setMetaConf_pargs args; - args.key = &key; - args.value = &value; - args.write(oprot_); + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -void ThriftHiveMetastoreClient::recv_setMetaConf() -{ - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; +ThriftHiveMetastore_get_next_write_id_pargs::~ThriftHiveMetastore_get_next_write_id_pargs() throw() { +} - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("setMetaConf") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_setMetaConf_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - throw result.o1; - } - return; -} +uint32_t ThriftHiveMetastore_get_next_write_id_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_write_id_pargs"); -void ThriftHiveMetastoreClient::create_database(const Database& database) -{ - send_create_database(database); - recv_create_database(); -} + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); -void ThriftHiveMetastoreClient::send_create_database(const Database& database) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("create_database", ::apache::thrift::protocol::T_CALL, cseqid); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} - ThriftHiveMetastore_create_database_pargs args; - args.database = &database; - args.write(oprot_); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); +ThriftHiveMetastore_get_next_write_id_result::~ThriftHiveMetastore_get_next_write_id_result() throw() { } -void ThriftHiveMetastoreClient::recv_create_database() -{ - int32_t rseqid = 0; +uint32_t ThriftHiveMetastore_get_next_write_id_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("create_database") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_create_database_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += iprot->readStructBegin(fname); - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; - } - return; -} + using ::apache::thrift::protocol::TProtocolException; -void ThriftHiveMetastoreClient::get_database(Database& _return, const std::string& name) -{ - send_get_database(name); - recv_get_database(_return); -} -void ThriftHiveMetastoreClient::send_get_database(const std::string& name) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_database", ::apache::thrift::protocol::T_CALL, cseqid); + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } - ThriftHiveMetastore_get_database_pargs args; - args.name = &name; - args.write(oprot_); + xfer += iprot->readStructEnd(); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); + return xfer; } -void ThriftHiveMetastoreClient::recv_get_database(Database& _return) -{ +uint32_t ThriftHiveMetastore_get_next_write_id_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + uint32_t xfer = 0; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_database") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_database_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_next_write_id_result"); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; } -void ThriftHiveMetastoreClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) -{ - send_drop_database(name, deleteData, cascade); - recv_drop_database(); + +ThriftHiveMetastore_get_next_write_id_presult::~ThriftHiveMetastore_get_next_write_id_presult() throw() { } -void ThriftHiveMetastoreClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_database_pargs args; - args.name = &name; - args.deleteData = &deleteData; - args.cascade = &cascade; - args.write(oprot_); +uint32_t ThriftHiveMetastore_get_next_write_id_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); -} + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; -void ThriftHiveMetastoreClient::recv_drop_database() -{ + xfer += iprot->readStructBegin(fname); - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + using ::apache::thrift::protocol::TProtocolException; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("drop_database") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_drop_database_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - throw result.o1; - } - if (result.__isset.o2) { - throw result.o2; - } - if (result.__isset.o3) { - throw result.o3; + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - return; -} -void ThriftHiveMetastoreClient::get_databases(std::vector & _return, const std::string& pattern) -{ - send_get_databases(pattern); - recv_get_databases(_return); -} + xfer += iprot->readStructEnd(); -void ThriftHiveMetastoreClient::send_get_databases(const std::string& pattern) -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); + return xfer; +} - ThriftHiveMetastore_get_databases_pargs args; - args.pattern = &pattern; - args.write(oprot_); - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); +ThriftHiveMetastore_finalize_write_id_args::~ThriftHiveMetastore_finalize_write_id_args() throw() { } -void ThriftHiveMetastoreClient::recv_get_databases(std::vector & _return) -{ - int32_t rseqid = 0; +uint32_t ThriftHiveMetastore_finalize_write_id_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; std::string fname; - ::apache::thrift::protocol::TMessageType mtype; + ::apache::thrift::protocol::TType ftype; + int16_t fid; - iprot_->readMessageBegin(fname, mtype, rseqid); - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_databases") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - ThriftHiveMetastore_get_databases_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); + xfer += iprot->readStructBegin(fname); - if (result.__isset.success) { - // _return pointer has now been filled - return; - } - if (result.__isset.o1) { - throw result.o1; + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_databases failed: unknown result"); -} -void ThriftHiveMetastoreClient::get_all_databases(std::vector & _return) -{ - send_get_all_databases(); - recv_get_all_databases(_return); + xfer += iprot->readStructEnd(); + + return xfer; } -void ThriftHiveMetastoreClient::send_get_all_databases() -{ - int32_t cseqid = 0; - oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); +uint32_t ThriftHiveMetastore_finalize_write_id_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_finalize_write_id_args"); - ThriftHiveMetastore_get_all_databases_pargs args; + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_finalize_write_id_pargs::~ThriftHiveMetastore_finalize_write_id_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_finalize_write_id_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_finalize_write_id_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_finalize_write_id_result::~ThriftHiveMetastore_finalize_write_id_result() throw() { +} + + +uint32_t ThriftHiveMetastore_finalize_write_id_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_finalize_write_id_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_finalize_write_id_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_finalize_write_id_presult::~ThriftHiveMetastore_finalize_write_id_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_finalize_write_id_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_heartbeat_write_id_args::~ThriftHiveMetastore_heartbeat_write_id_args() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_write_id_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_heartbeat_write_id_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_write_id_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_write_id_pargs::~ThriftHiveMetastore_heartbeat_write_id_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_write_id_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_write_id_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_write_id_result::~ThriftHiveMetastore_heartbeat_write_id_result() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_write_id_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_heartbeat_write_id_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_heartbeat_write_id_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_heartbeat_write_id_presult::~ThriftHiveMetastore_heartbeat_write_id_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_heartbeat_write_id_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_valid_write_ids_args::~ThriftHiveMetastore_get_valid_write_ids_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_valid_write_ids_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_valid_write_ids_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_valid_write_ids_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_valid_write_ids_pargs::~ThriftHiveMetastore_get_valid_write_ids_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_valid_write_ids_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_valid_write_ids_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_valid_write_ids_result::~ThriftHiveMetastore_get_valid_write_ids_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_valid_write_ids_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_valid_write_ids_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_valid_write_ids_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_valid_write_ids_presult::~ThriftHiveMetastore_get_valid_write_ids_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_valid_write_ids_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +void ThriftHiveMetastoreClient::getMetaConf(std::string& _return, const std::string& key) +{ + send_getMetaConf(key); + recv_getMetaConf(_return); +} + +void ThriftHiveMetastoreClient::send_getMetaConf(const std::string& key) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_getMetaConf_pargs args; + args.key = &key; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_getMetaConf(std::string& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getMetaConf") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_getMetaConf_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getMetaConf failed: unknown result"); +} + +void ThriftHiveMetastoreClient::setMetaConf(const std::string& key, const std::string& value) +{ + send_setMetaConf(key, value); + recv_setMetaConf(); +} + +void ThriftHiveMetastoreClient::send_setMetaConf(const std::string& key, const std::string& value) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_setMetaConf_pargs args; + args.key = &key; + args.value = &value; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_setMetaConf() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("setMetaConf") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_setMetaConf_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + return; +} + +void ThriftHiveMetastoreClient::create_database(const Database& database) +{ + send_create_database(database); + recv_create_database(); +} + +void ThriftHiveMetastoreClient::send_create_database(const Database& database) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("create_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_database_pargs args; + args.database = &database; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_create_database() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_create_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + +void ThriftHiveMetastoreClient::get_database(Database& _return, const std::string& name) +{ + send_get_database(name); + recv_get_database(_return); +} + +void ThriftHiveMetastoreClient::send_get_database(const std::string& name) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_database_pargs args; + args.name = &name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_database(Database& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_database_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); +} + +void ThriftHiveMetastoreClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) +{ + send_drop_database(name, deleteData, cascade); + recv_drop_database(); +} + +void ThriftHiveMetastoreClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_drop_database_pargs args; + args.name = &name; + args.deleteData = &deleteData; + args.cascade = &cascade; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_drop_database() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_drop_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + return; +} + +void ThriftHiveMetastoreClient::get_databases(std::vector & _return, const std::string& pattern) +{ + send_get_databases(pattern); + recv_get_databases(_return); +} + +void ThriftHiveMetastoreClient::send_get_databases(const std::string& pattern) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_databases_pargs args; + args.pattern = &pattern; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_databases(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_databases") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_databases_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_databases failed: unknown result"); +} + +void ThriftHiveMetastoreClient::get_all_databases(std::vector & _return) +{ + send_get_all_databases(); + recv_get_all_databases(_return); +} + +void ThriftHiveMetastoreClient::send_get_all_databases() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_all_databases_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -46846,6 +47594,238 @@ void ThriftHiveMetastoreClient::recv_cache_file_metadata(CacheFileMetadataResult throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); } +void ThriftHiveMetastoreClient::get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) +{ + send_get_next_write_id(req); + recv_get_next_write_id(_return); +} + +void ThriftHiveMetastoreClient::send_get_next_write_id(const GetNextWriteIdRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_next_write_id", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_next_write_id_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_next_write_id(GetNextWriteIdResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_next_write_id") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_next_write_id_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_write_id failed: unknown result"); +} + +void ThriftHiveMetastoreClient::finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) +{ + send_finalize_write_id(req); + recv_finalize_write_id(_return); +} + +void ThriftHiveMetastoreClient::send_finalize_write_id(const FinalizeWriteIdRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("finalize_write_id", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_finalize_write_id_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_finalize_write_id(FinalizeWriteIdResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("finalize_write_id") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_finalize_write_id_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "finalize_write_id failed: unknown result"); +} + +void ThriftHiveMetastoreClient::heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) +{ + send_heartbeat_write_id(req); + recv_heartbeat_write_id(_return); +} + +void ThriftHiveMetastoreClient::send_heartbeat_write_id(const HeartbeatWriteIdRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("heartbeat_write_id", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_heartbeat_write_id_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_heartbeat_write_id(HeartbeatWriteIdResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("heartbeat_write_id") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_heartbeat_write_id_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_write_id failed: unknown result"); +} + +void ThriftHiveMetastoreClient::get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) +{ + send_get_valid_write_ids(req); + recv_get_valid_write_ids(_return); +} + +void ThriftHiveMetastoreClient::send_get_valid_write_ids(const GetValidWriteIdsRequest& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_valid_write_ids", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_valid_write_ids_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_valid_write_ids(GetValidWriteIdsResult& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_valid_write_ids") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_valid_write_ids_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_valid_write_ids failed: unknown result"); +} + bool ThriftHiveMetastoreProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) { ProcessMap::iterator pfn; pfn = processMap_.find(fname); @@ -54973,46 +55953,481 @@ void ThriftHiveMetastoreProcessor::process_show_locks(int32_t seqid, ::apache::t } } -void ThriftHiveMetastoreProcessor::process_heartbeat(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_heartbeat(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.heartbeat", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.heartbeat"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.heartbeat"); + } + + ThriftHiveMetastore_heartbeat_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.heartbeat", bytes); + } + + ThriftHiveMetastore_heartbeat_result result; + try { + iface_->heartbeat(args.ids); + } catch (NoSuchLockException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchTxnException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (TxnAbortedException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.heartbeat"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.heartbeat"); + } + + oprot->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.heartbeat", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_heartbeat_txn_range(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.heartbeat_txn_range", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + } + + ThriftHiveMetastore_heartbeat_txn_range_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.heartbeat_txn_range", bytes); + } + + ThriftHiveMetastore_heartbeat_txn_range_result result; + try { + iface_->heartbeat_txn_range(result.success, args.txns); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + } + + oprot->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.heartbeat_txn_range", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_compact(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.compact", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.compact"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.compact"); + } + + ThriftHiveMetastore_compact_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.compact", bytes); + } + + ThriftHiveMetastore_compact_result result; + try { + iface_->compact(args.rqst); + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.compact"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("compact", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.compact"); + } + + oprot->writeMessageBegin("compact", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.compact", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_show_compact(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.show_compact", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.show_compact"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.show_compact"); + } + + ThriftHiveMetastore_show_compact_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.show_compact", bytes); + } + + ThriftHiveMetastore_show_compact_result result; + try { + iface_->show_compact(result.success, args.rqst); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.show_compact"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.show_compact"); + } + + oprot->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.show_compact", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_add_dynamic_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_dynamic_partitions", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + } + + ThriftHiveMetastore_add_dynamic_partitions_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_dynamic_partitions", bytes); + } + + ThriftHiveMetastore_add_dynamic_partitions_result result; + try { + iface_->add_dynamic_partitions(args.rqst); + } catch (NoSuchTxnException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (TxnAbortedException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + } + + oprot->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_dynamic_partitions", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_next_notification(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_next_notification", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_next_notification"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_next_notification"); + } + + ThriftHiveMetastore_get_next_notification_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_next_notification", bytes); + } + + ThriftHiveMetastore_get_next_notification_result result; + try { + iface_->get_next_notification(result.success, args.rqst); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_next_notification"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_next_notification"); + } + + oprot->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_next_notification", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_current_notificationEventId(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_current_notificationEventId", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + } + + ThriftHiveMetastore_get_current_notificationEventId_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_current_notificationEventId", bytes); + } + + ThriftHiveMetastore_get_current_notificationEventId_result result; + try { + iface_->get_current_notificationEventId(result.success); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + } + + oprot->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_current_notificationEventId", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_fire_listener_event(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.fire_listener_event", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.fire_listener_event"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.fire_listener_event"); + } + + ThriftHiveMetastore_fire_listener_event_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.fire_listener_event", bytes); + } + + ThriftHiveMetastore_fire_listener_event_result result; + try { + iface_->fire_listener_event(result.success, args.rqst); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.fire_listener_event"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.fire_listener_event"); + } + + oprot->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.fire_listener_event", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_flushCache(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.heartbeat", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.flushCache", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.heartbeat"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.flushCache"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.heartbeat"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.flushCache"); } - ThriftHiveMetastore_heartbeat_args args; + ThriftHiveMetastore_flushCache_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.heartbeat", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.flushCache", bytes); } - ThriftHiveMetastore_heartbeat_result result; + ThriftHiveMetastore_flushCache_result result; try { - iface_->heartbeat(args.ids); - } catch (NoSuchLockException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (NoSuchTxnException &o2) { - result.o2 = o2; - result.__isset.o2 = true; - } catch (TxnAbortedException &o3) { - result.o3 = o3; - result.__isset.o3 = true; + iface_->flushCache(); } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.heartbeat"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.flushCache"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55021,52 +56436,52 @@ void ThriftHiveMetastoreProcessor::process_heartbeat(int32_t seqid, ::apache::th } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.heartbeat"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.flushCache"); } - oprot->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.heartbeat", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.flushCache", bytes); } } -void ThriftHiveMetastoreProcessor::process_heartbeat_txn_range(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_file_metadata_by_expr(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.heartbeat_txn_range", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata_by_expr", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); } - ThriftHiveMetastore_heartbeat_txn_range_args args; + ThriftHiveMetastore_get_file_metadata_by_expr_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.heartbeat_txn_range", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); } - ThriftHiveMetastore_heartbeat_txn_range_result result; + ThriftHiveMetastore_get_file_metadata_by_expr_result result; try { - iface_->heartbeat_txn_range(result.success, args.txns); + iface_->get_file_metadata_by_expr(result.success, args.req); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55075,51 +56490,52 @@ void ThriftHiveMetastoreProcessor::process_heartbeat_txn_range(int32_t seqid, :: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.heartbeat_txn_range"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); } - oprot->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.heartbeat_txn_range", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); } } -void ThriftHiveMetastoreProcessor::process_compact(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.compact", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.compact"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.compact"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata"); } - ThriftHiveMetastore_compact_args args; + ThriftHiveMetastore_get_file_metadata_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.compact", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); } - ThriftHiveMetastore_compact_result result; + ThriftHiveMetastore_get_file_metadata_result result; try { - iface_->compact(args.rqst); + iface_->get_file_metadata(result.success, args.req); + result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.compact"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("compact", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55128,52 +56544,52 @@ void ThriftHiveMetastoreProcessor::process_compact(int32_t seqid, ::apache::thri } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.compact"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata"); } - oprot->writeMessageBegin("compact", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.compact", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); } } -void ThriftHiveMetastoreProcessor::process_show_compact(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.show_compact", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.put_file_metadata", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.show_compact"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.put_file_metadata"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.show_compact"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.put_file_metadata"); } - ThriftHiveMetastore_show_compact_args args; + ThriftHiveMetastore_put_file_metadata_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.show_compact", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); } - ThriftHiveMetastore_show_compact_result result; + ThriftHiveMetastore_put_file_metadata_result result; try { - iface_->show_compact(result.success, args.rqst); + iface_->put_file_metadata(result.success, args.req); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.show_compact"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.put_file_metadata"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55182,57 +56598,52 @@ void ThriftHiveMetastoreProcessor::process_show_compact(int32_t seqid, ::apache: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.show_compact"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.put_file_metadata"); } - oprot->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.show_compact", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); } } -void ThriftHiveMetastoreProcessor::process_add_dynamic_partitions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.add_dynamic_partitions", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.clear_file_metadata", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.clear_file_metadata"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.clear_file_metadata"); } - ThriftHiveMetastore_add_dynamic_partitions_args args; + ThriftHiveMetastore_clear_file_metadata_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.add_dynamic_partitions", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); } - ThriftHiveMetastore_add_dynamic_partitions_result result; + ThriftHiveMetastore_clear_file_metadata_result result; try { - iface_->add_dynamic_partitions(args.rqst); - } catch (NoSuchTxnException &o1) { - result.o1 = o1; - result.__isset.o1 = true; - } catch (TxnAbortedException &o2) { - result.o2 = o2; - result.__isset.o2 = true; + iface_->clear_file_metadata(result.success, args.req); + result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.clear_file_metadata"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55241,52 +56652,52 @@ void ThriftHiveMetastoreProcessor::process_add_dynamic_partitions(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.add_dynamic_partitions"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.clear_file_metadata"); } - oprot->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.add_dynamic_partitions", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_next_notification(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_cache_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_next_notification", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.cache_file_metadata", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_next_notification"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.cache_file_metadata"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_next_notification"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.cache_file_metadata"); } - ThriftHiveMetastore_get_next_notification_args args; + ThriftHiveMetastore_cache_file_metadata_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_next_notification", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); } - ThriftHiveMetastore_get_next_notification_result result; + ThriftHiveMetastore_cache_file_metadata_result result; try { - iface_->get_next_notification(result.success, args.rqst); + iface_->cache_file_metadata(result.success, args.req); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_next_notification"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.cache_file_metadata"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55295,52 +56706,52 @@ void ThriftHiveMetastoreProcessor::process_get_next_notification(int32_t seqid, } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_next_notification"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.cache_file_metadata"); } - oprot->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_next_notification", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_current_notificationEventId(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_next_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_current_notificationEventId", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_next_write_id", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_next_write_id"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_next_write_id"); } - ThriftHiveMetastore_get_current_notificationEventId_args args; + ThriftHiveMetastore_get_next_write_id_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_current_notificationEventId", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_next_write_id", bytes); } - ThriftHiveMetastore_get_current_notificationEventId_result result; + ThriftHiveMetastore_get_next_write_id_result result; try { - iface_->get_current_notificationEventId(result.success); + iface_->get_next_write_id(result.success, args.req); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_next_write_id"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_next_write_id", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55349,52 +56760,52 @@ void ThriftHiveMetastoreProcessor::process_get_current_notificationEventId(int32 } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_current_notificationEventId"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_next_write_id"); } - oprot->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_next_write_id", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_current_notificationEventId", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_next_write_id", bytes); } } -void ThriftHiveMetastoreProcessor::process_fire_listener_event(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_finalize_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.fire_listener_event", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.finalize_write_id", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.fire_listener_event"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.finalize_write_id"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.fire_listener_event"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.finalize_write_id"); } - ThriftHiveMetastore_fire_listener_event_args args; + ThriftHiveMetastore_finalize_write_id_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.fire_listener_event", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.finalize_write_id", bytes); } - ThriftHiveMetastore_fire_listener_event_result result; + ThriftHiveMetastore_finalize_write_id_result result; try { - iface_->fire_listener_event(result.success, args.rqst); + iface_->finalize_write_id(result.success, args.req); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.fire_listener_event"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.finalize_write_id"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("finalize_write_id", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55403,51 +56814,52 @@ void ThriftHiveMetastoreProcessor::process_fire_listener_event(int32_t seqid, :: } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.fire_listener_event"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.finalize_write_id"); } - oprot->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("finalize_write_id", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.fire_listener_event", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.finalize_write_id", bytes); } } -void ThriftHiveMetastoreProcessor::process_flushCache(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_heartbeat_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.flushCache", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.heartbeat_write_id", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.flushCache"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.heartbeat_write_id"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.flushCache"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.heartbeat_write_id"); } - ThriftHiveMetastore_flushCache_args args; + ThriftHiveMetastore_heartbeat_write_id_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.flushCache", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.heartbeat_write_id", bytes); } - ThriftHiveMetastore_flushCache_result result; + ThriftHiveMetastore_heartbeat_write_id_result result; try { - iface_->flushCache(); + iface_->heartbeat_write_id(result.success, args.req); + result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.flushCache"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.heartbeat_write_id"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("heartbeat_write_id", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55456,52 +56868,52 @@ void ThriftHiveMetastoreProcessor::process_flushCache(int32_t seqid, ::apache::t } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.flushCache"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.heartbeat_write_id"); } - oprot->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("heartbeat_write_id", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.flushCache", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.heartbeat_write_id", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_file_metadata_by_expr(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreProcessor::process_get_valid_write_ids(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata_by_expr", callContext); + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_valid_write_ids", callContext); } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_valid_write_ids"); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_valid_write_ids"); } - ThriftHiveMetastore_get_file_metadata_by_expr_args args; + ThriftHiveMetastore_get_valid_write_ids_args args; args.read(iprot); iprot->readMessageEnd(); uint32_t bytes = iprot->getTransport()->readEnd(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_valid_write_ids", bytes); } - ThriftHiveMetastore_get_file_metadata_by_expr_result result; + ThriftHiveMetastore_get_valid_write_ids_result result; try { - iface_->get_file_metadata_by_expr(result.success, args.req); + iface_->get_valid_write_ids(result.success, args.req); result.__isset.success = true; } catch (const std::exception& e) { if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_valid_write_ids"); } ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_EXCEPTION, seqid); + oprot->writeMessageBegin("get_valid_write_ids", ::apache::thrift::protocol::T_EXCEPTION, seqid); x.write(oprot); oprot->writeMessageEnd(); oprot->getTransport()->writeEnd(); @@ -55510,257 +56922,573 @@ void ThriftHiveMetastoreProcessor::process_get_file_metadata_by_expr(int32_t seq } if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr"); + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_valid_write_ids"); } - oprot->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_REPLY, seqid); + oprot->writeMessageBegin("get_valid_write_ids", ::apache::thrift::protocol::T_REPLY, seqid); result.write(oprot); oprot->writeMessageEnd(); bytes = oprot->getTransport()->writeEnd(); oprot->getTransport()->flush(); if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata_by_expr", bytes); + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_valid_write_ids", bytes); } } -void ThriftHiveMetastoreProcessor::process_get_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveMetastoreProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { + ::apache::thrift::ReleaseHandler< ThriftHiveMetastoreIfFactory > cleanup(handlerFactory_); + ::boost::shared_ptr< ThriftHiveMetastoreIf > handler(handlerFactory_->getHandler(connInfo), cleanup); + ::boost::shared_ptr< ::apache::thrift::TProcessor > processor(new ThriftHiveMetastoreProcessor(handler)); + return processor; +} + +void ThriftHiveMetastoreConcurrentClient::getMetaConf(std::string& _return, const std::string& key) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_file_metadata", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_file_metadata"); + int32_t seqid = send_getMetaConf(key); + recv_getMetaConf(_return, seqid); +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_file_metadata"); - } +int32_t ThriftHiveMetastoreConcurrentClient::send_getMetaConf(const std::string& key) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_file_metadata_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + ThriftHiveMetastore_getMetaConf_pargs args; + args.key = &key; + args.write(oprot_); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); - } + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); - ThriftHiveMetastore_get_file_metadata_result result; - try { - iface_->get_file_metadata(result.success, args.req); - result.__isset.success = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_file_metadata"); + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getMetaConf") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_getMetaConf_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getMetaConf failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::setMetaConf(const std::string& key, const std::string& value) +{ + int32_t seqid = send_setMetaConf(key, value); + recv_setMetaConf(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_setMetaConf(const std::string& key, const std::string& value) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_setMetaConf_pargs args; + args.key = &key; + args.value = &value; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("setMetaConf") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_setMetaConf_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::create_database(const Database& database) +{ + int32_t seqid = send_create_database(database); + recv_create_database(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_create_database(const Database& database) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("create_database", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_create_database_pargs args; + args.database = &database; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("create_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_create_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_database(Database& _return, const std::string& name) +{ + int32_t seqid = send_get_database(name); + recv_get_database(_return, seqid); +} - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } +int32_t ThriftHiveMetastoreConcurrentClient::send_get_database(const std::string& name) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_database", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_file_metadata"); - } + ThriftHiveMetastore_get_database_pargs args; + args.name = &name; + args.write(oprot_); - oprot->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_file_metadata", bytes); - } + sentry.commit(); + return cseqid; } -void ThriftHiveMetastoreProcessor::process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, const int32_t seqid) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.put_file_metadata", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.put_file_metadata"); - - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.put_file_metadata"); - } - ThriftHiveMetastore_put_file_metadata_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); - } + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - ThriftHiveMetastore_put_file_metadata_result result; - try { - iface_->put_file_metadata(result.success, args.req); - result.__isset.success = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.put_file_metadata"); + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_database_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.put_file_metadata"); - } + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); - oprot->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.put_file_metadata", bytes); - } +void ThriftHiveMetastoreConcurrentClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) +{ + int32_t seqid = send_drop_database(name, deleteData, cascade); + recv_drop_database(seqid); } -void ThriftHiveMetastoreProcessor::process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.clear_file_metadata", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.clear_file_metadata"); + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.clear_file_metadata"); - } + ThriftHiveMetastore_drop_database_pargs args; + args.name = &name; + args.deleteData = &deleteData; + args.cascade = &cascade; + args.write(oprot_); - ThriftHiveMetastore_clear_file_metadata_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); - } + sentry.commit(); + return cseqid; +} - ThriftHiveMetastore_clear_file_metadata_result result; - try { - iface_->clear_file_metadata(result.success, args.req); - result.__isset.success = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.clear_file_metadata"); +void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("drop_database") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_drop_database_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.clear_file_metadata"); - } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); - oprot->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.clear_file_metadata", bytes); - } +void ThriftHiveMetastoreConcurrentClient::get_databases(std::vector & _return, const std::string& pattern) +{ + int32_t seqid = send_get_databases(pattern); + recv_get_databases(_return, seqid); } -void ThriftHiveMetastoreProcessor::process_cache_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::string& pattern) { - void* ctx = NULL; - if (this->eventHandler_.get() != NULL) { - ctx = this->eventHandler_->getContext("ThriftHiveMetastore.cache_file_metadata", callContext); - } - ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.cache_file_metadata"); + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.cache_file_metadata"); - } + ThriftHiveMetastore_get_databases_pargs args; + args.pattern = &pattern; + args.write(oprot_); - ThriftHiveMetastore_cache_file_metadata_args args; - args.read(iprot); - iprot->readMessageEnd(); - uint32_t bytes = iprot->getTransport()->readEnd(); + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); - } + sentry.commit(); + return cseqid; +} - ThriftHiveMetastore_cache_file_metadata_result result; - try { - iface_->cache_file_metadata(result.success, args.req); - result.__isset.success = true; - } catch (const std::exception& e) { - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.cache_file_metadata"); - } +void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return, const int32_t seqid) +{ - ::apache::thrift::TApplicationException x(e.what()); - oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_EXCEPTION, seqid); - x.write(oprot); - oprot->writeMessageEnd(); - oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); - return; - } + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.cache_file_metadata"); - } + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - oprot->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_REPLY, seqid); - result.write(oprot); - oprot->writeMessageEnd(); - bytes = oprot->getTransport()->writeEnd(); - oprot->getTransport()->flush(); + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_databases") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); - if (this->eventHandler_.get() != NULL) { - this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.cache_file_metadata", bytes); - } -} + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_databases_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); -::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveMetastoreProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { - ::apache::thrift::ReleaseHandler< ThriftHiveMetastoreIfFactory > cleanup(handlerFactory_); - ::boost::shared_ptr< ThriftHiveMetastoreIf > handler(handlerFactory_->getHandler(connInfo), cleanup); - ::boost::shared_ptr< ::apache::thrift::TProcessor > processor(new ThriftHiveMetastoreProcessor(handler)); - return processor; + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_databases failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::getMetaConf(std::string& _return, const std::string& key) +void ThriftHiveMetastoreConcurrentClient::get_all_databases(std::vector & _return) { - int32_t seqid = send_getMetaConf(key); - recv_getMetaConf(_return, seqid); + int32_t seqid = send_get_all_databases(); + recv_get_all_databases(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_getMetaConf(const std::string& key) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("getMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_getMetaConf_pargs args; - args.key = &key; + ThriftHiveMetastore_get_all_databases_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -55771,7 +57499,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_getMetaConf(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -55800,7 +57528,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("getMetaConf") != 0) { + if (fname.compare("get_all_databases") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -55809,7 +57537,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_getMetaConf_presult result; + ThriftHiveMetastore_get_all_databases_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -55825,7 +57553,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getMetaConf failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_databases failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -55835,21 +57563,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_getMetaConf(std::string& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::setMetaConf(const std::string& key, const std::string& value) +void ThriftHiveMetastoreConcurrentClient::alter_database(const std::string& dbname, const Database& db) { - int32_t seqid = send_setMetaConf(key, value); - recv_setMetaConf(seqid); + int32_t seqid = send_alter_database(dbname, db); + recv_alter_database(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_setMetaConf(const std::string& key, const std::string& value) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_database(const std::string& dbname, const Database& db) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("setMetaConf", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_setMetaConf_pargs args; - args.key = &key; - args.value = &value; + ThriftHiveMetastore_alter_database_pargs args; + args.dbname = &dbname; + args.db = &db; args.write(oprot_); oprot_->writeMessageEnd(); @@ -55860,7 +57588,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_setMetaConf(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqid) { int32_t rseqid = 0; @@ -55889,7 +57617,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("setMetaConf") != 0) { + if (fname.compare("alter_database") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -55898,7 +57626,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_setMetaConf_presult result; + ThriftHiveMetastore_alter_database_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -55907,6 +57635,10 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } sentry.commit(); return; } @@ -55918,20 +57650,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_setMetaConf(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_database(const Database& database) +void ThriftHiveMetastoreConcurrentClient::get_type(Type& _return, const std::string& name) { - int32_t seqid = send_create_database(database); - recv_create_database(seqid); + int32_t seqid = send_get_type(name); + recv_get_type(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_database(const Database& database) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_database_pargs args; - args.database = &database; + ThriftHiveMetastore_get_type_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -55942,7 +57674,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_database(const Database return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -55971,7 +57703,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_database") != 0) { + if (fname.compare("get_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -55980,11 +57712,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_database_presult result; + ThriftHiveMetastore_get_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -55993,12 +57731,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -56008,20 +57742,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_database(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_database(Database& _return, const std::string& name) +bool ThriftHiveMetastoreConcurrentClient::create_type(const Type& type) { - int32_t seqid = send_get_database(name); - recv_get_database(_return, seqid); + int32_t seqid = send_create_type(type); + return recv_create_type(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_database(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_database_pargs args; - args.name = &name; + ThriftHiveMetastore_create_type_pargs args; + args.type = &type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -56032,7 +57766,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_database(const std::string return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) { int32_t rseqid = 0; @@ -56061,7 +57795,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_database") != 0) { + if (fname.compare("create_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -56070,16 +57804,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_database_presult result; + bool _return; + ThriftHiveMetastore_create_type_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -56089,8 +57823,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_database failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -56100,22 +57838,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_database(Database& _return, c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_database(const std::string& name, const bool deleteData, const bool cascade) +bool ThriftHiveMetastoreConcurrentClient::drop_type(const std::string& type) { - int32_t seqid = send_drop_database(name, deleteData, cascade); - recv_drop_database(seqid); + int32_t seqid = send_drop_type(type); + return recv_drop_type(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::string& name, const bool deleteData, const bool cascade) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_database", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_database_pargs args; - args.name = &name; - args.deleteData = &deleteData; - args.cascade = &cascade; + ThriftHiveMetastore_drop_type_pargs args; + args.type = &type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -56126,7 +57862,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_database(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) { int32_t rseqid = 0; @@ -56155,7 +57891,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_database") != 0) { + if (fname.compare("drop_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -56164,11 +57900,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_database_presult result; + bool _return; + ThriftHiveMetastore_drop_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -56177,12 +57919,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -56192,20 +57930,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_database(const int32_t seqid } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_databases(std::vector & _return, const std::string& pattern) +void ThriftHiveMetastoreConcurrentClient::get_type_all(std::map & _return, const std::string& name) { - int32_t seqid = send_get_databases(pattern); - recv_get_databases(_return, seqid); + int32_t seqid = send_get_type_all(name); + recv_get_type_all(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::string& pattern) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string& name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_databases", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_databases_pargs args; - args.pattern = &pattern; + ThriftHiveMetastore_get_type_all_pargs args; + args.name = &name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -56216,7 +57954,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_databases(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -56245,7 +57983,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_databases") != 0) { + if (fname.compare("get_type_all") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -56254,7 +57992,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorreadMessageEnd(); @@ -56265,12 +58003,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -56280,19 +58018,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_databases(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name) { - int32_t seqid = send_get_all_databases(); - recv_get_all_databases(_return, seqid); + int32_t seqid = send_get_fields(db_name, table_name); + recv_get_fields(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& db_name, const std::string& table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_databases", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_databases_pargs args; + ThriftHiveMetastore_get_fields_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -56303,7 +58043,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_databases() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -56332,7 +58072,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_databases") != 0) { + if (fname.compare("get_fields") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -56341,7 +58081,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorreadMessageEnd(); @@ -56352,89 +58092,6 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_databases(std::vectorsync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::alter_database(const std::string& dbname, const Database& db) -{ - int32_t seqid = send_alter_database(dbname, db); - recv_alter_database(seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_database(const std::string& dbname, const Database& db) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_database", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_alter_database_pargs args; - args.dbname = &dbname; - args.db = &db; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("alter_database") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_alter_database_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -56443,8 +58100,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -56454,20 +58115,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_database(const int32_t seqi } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_type(Type& _return, const std::string& name) +void ThriftHiveMetastoreConcurrentClient::get_fields_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_type(name); - recv_get_type(_return, seqid); + int32_t seqid = send_get_fields_with_environment_context(db_name, table_name, environment_context); + recv_get_fields_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_type_pargs args; - args.name = &name; + ThriftHiveMetastore_get_fields_with_environment_context_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -56478,7 +58141,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_type(const std::string& na return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_context(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -56507,7 +58170,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_type") != 0) { + if (fname.compare("get_fields_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -56516,7 +58179,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_type_presult result; + ThriftHiveMetastore_get_fields_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -56535,8 +58198,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -56546,20 +58213,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type(Type& _return, const int } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::create_type(const Type& type) +void ThriftHiveMetastoreConcurrentClient::get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) { - int32_t seqid = send_create_type(type); - return recv_create_type(seqid); + int32_t seqid = send_get_schema(db_name, table_name); + recv_get_schema(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& db_name, const std::string& table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_type_pargs args; - args.type = &type; + ThriftHiveMetastore_get_schema_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -56570,7 +58238,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_type(const Type& type) return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -56599,7 +58267,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_type") != 0) { + if (fname.compare("get_schema") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -56608,16 +58276,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_create_type_presult result; + ThriftHiveMetastore_get_schema_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -56632,7 +58300,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_type failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -56642,20 +58310,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_type(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_type(const std::string& type) +void ThriftHiveMetastoreConcurrentClient::get_schema_with_environment_context(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_drop_type(type); - return recv_drop_type(seqid); + int32_t seqid = send_get_schema_with_environment_context(db_name, table_name, environment_context); + recv_get_schema_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& type) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_type_pargs args; - args.type = &type; + ThriftHiveMetastore_get_schema_with_environment_context_pargs args; + args.db_name = &db_name; + args.table_name = &table_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -56666,7 +58336,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_type(const std::string& t return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_context(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -56695,7 +58365,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_type") != 0) { + if (fname.compare("get_schema_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -56704,16 +58374,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_type_presult result; + ThriftHiveMetastore_get_schema_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -56723,8 +58393,12 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_type failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -56734,20 +58408,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_type(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_type_all(std::map & _return, const std::string& name) +void ThriftHiveMetastoreConcurrentClient::create_table(const Table& tbl) { - int32_t seqid = send_get_type_all(name); - recv_get_type_all(_return, seqid); + int32_t seqid = send_create_table(tbl); + recv_create_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string& name) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_type_all", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_type_all_pargs args; - args.name = &name; + ThriftHiveMetastore_create_table_pargs args; + args.tbl = &tbl; args.write(oprot_); oprot_->writeMessageEnd(); @@ -56758,7 +58432,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_type_all(const std::string return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) { int32_t rseqid = 0; @@ -56787,7 +58461,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_type_all") != 0) { + if (fname.compare("create_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -56796,23 +58470,29 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } if (result.__isset.o2) { sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_type_all failed: unknown result"); + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -56822,21 +58502,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_type_all(std::map & _return, const std::string& db_name, const std::string& table_name) +void ThriftHiveMetastoreConcurrentClient::create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_fields(db_name, table_name); - recv_get_fields(_return, seqid); + int32_t seqid = send_create_table_with_environment_context(tbl, environment_context); + recv_create_table_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& db_name, const std::string& table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_fields", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_fields_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; + ThriftHiveMetastore_create_table_with_environment_context_pargs args; + args.tbl = &tbl; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -56847,7 +58527,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -56876,7 +58556,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_fields") != 0) { + if (fname.compare("create_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -56885,17 +58565,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -56908,8 +58582,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -56919,22 +58597,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys) { - int32_t seqid = send_get_fields_with_environment_context(db_name, table_name, environment_context); - recv_get_fields_with_environment_context(_return, seqid); + int32_t seqid = send_create_table_with_constraints(tbl, primaryKeys, foreignKeys); + recv_create_table_with_constraints(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_fields_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_fields_with_environment_context_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.environment_context = &environment_context; + ThriftHiveMetastore_create_table_with_constraints_pargs args; + args.tbl = &tbl; + args.primaryKeys = &primaryKeys; + args.foreignKeys = &foreignKeys; args.write(oprot_); oprot_->writeMessageEnd(); @@ -56945,7 +58623,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_fields_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_context(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(const int32_t seqid) { int32_t rseqid = 0; @@ -56974,7 +58652,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_fields_with_environment_context") != 0) { + if (fname.compare("create_table_with_constraints") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -56983,17 +58661,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_fields_with_environment_context_presult result; - result.success = &_return; + ThriftHiveMetastore_create_table_with_constraints_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -57006,8 +58678,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte sentry.commit(); throw result.o3; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_fields_with_environment_context failed: unknown result"); + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -57017,21 +58693,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_fields_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name) +void ThriftHiveMetastoreConcurrentClient::drop_constraint(const DropConstraintRequest& req) { - int32_t seqid = send_get_schema(db_name, table_name); - recv_get_schema(_return, seqid); + int32_t seqid = send_drop_constraint(req); + recv_drop_constraint(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& db_name, const std::string& table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropConstraintRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_schema", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_schema_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; + ThriftHiveMetastore_drop_constraint_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57042,7 +58717,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seqid) { int32_t rseqid = 0; @@ -57071,7 +58746,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_schema") != 0) { + if (fname.compare("drop_constraint") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57080,31 +58755,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } if (result.__isset.o3) { sentry.commit(); throw result.o3; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -57114,22 +58779,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema(std::vector & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::add_primary_key(const AddPrimaryKeyRequest& req) { - int32_t seqid = send_get_schema_with_environment_context(db_name, table_name, environment_context); - recv_get_schema_with_environment_context(_return, seqid); + int32_t seqid = send_add_primary_key(req); + recv_add_primary_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_context(const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrimaryKeyRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_schema_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_schema_with_environment_context_pargs args; - args.db_name = &db_name; - args.table_name = &table_name; - args.environment_context = &environment_context; + ThriftHiveMetastore_add_primary_key_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57140,7 +58803,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_schema_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_context(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seqid) { int32_t rseqid = 0; @@ -57169,7 +58832,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_schema_with_environment_context") != 0) { + if (fname.compare("add_primary_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57178,17 +58841,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_schema_with_environment_context_presult result; - result.success = &_return; + ThriftHiveMetastore_add_primary_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -57197,12 +58854,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_schema_with_environment_context failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -57212,20 +58865,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_schema_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table(const Table& tbl) +void ThriftHiveMetastoreConcurrentClient::add_foreign_key(const AddForeignKeyRequest& req) { - int32_t seqid = send_create_table(tbl); - recv_create_table(seqid); + int32_t seqid = send_add_foreign_key(req); + recv_add_foreign_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForeignKeyRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_pargs args; - args.tbl = &tbl; + ThriftHiveMetastore_add_foreign_key_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57236,7 +58889,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table(const Table& tbl) return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seqid) { int32_t rseqid = 0; @@ -57265,7 +58918,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table") != 0) { + if (fname.compare("add_foreign_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57274,7 +58927,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_presult result; + ThriftHiveMetastore_add_foreign_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57287,14 +58940,6 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } sentry.commit(); return; } @@ -57306,21 +58951,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { - int32_t seqid = send_create_table_with_environment_context(tbl, environment_context); - recv_create_table_with_environment_context(seqid); + int32_t seqid = send_drop_table(dbname, name, deleteData); + recv_drop_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_environment_context_pargs args; - args.tbl = &tbl; - args.environment_context = &environment_context; + ThriftHiveMetastore_drop_table_pargs args; + args.dbname = &dbname; + args.name = &name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57331,7 +58977,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_environment_ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) { int32_t rseqid = 0; @@ -57360,7 +59006,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table_with_environment_context") != 0) { + if (fname.compare("drop_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57369,7 +59015,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_with_environment_context_presult result; + ThriftHiveMetastore_drop_table_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57378,18 +59024,10 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } if (result.__isset.o3) { sentry.commit(); throw result.o3; } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } sentry.commit(); return; } @@ -57401,22 +59039,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_environment_con } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys) +void ThriftHiveMetastoreConcurrentClient::drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) { - int32_t seqid = send_create_table_with_constraints(tbl, primaryKeys, foreignKeys); - recv_create_table_with_constraints(seqid); + int32_t seqid = send_drop_table_with_environment_context(dbname, name, deleteData, environment_context); + recv_drop_table_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints(const Table& tbl, const std::vector & primaryKeys, const std::vector & foreignKeys) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_table_with_constraints", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_table_with_constraints_pargs args; - args.tbl = &tbl; - args.primaryKeys = &primaryKeys; - args.foreignKeys = &foreignKeys; + ThriftHiveMetastore_drop_table_with_environment_context_pargs args; + args.dbname = &dbname; + args.name = &name; + args.deleteData = &deleteData; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57427,7 +59066,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_table_with_constraints( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -57456,7 +59095,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_table_with_constraints") != 0) { + if (fname.compare("drop_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57465,7 +59104,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_table_with_constraints_presult result; + ThriftHiveMetastore_drop_table_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57474,18 +59113,10 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } if (result.__isset.o3) { sentry.commit(); throw result.o3; } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } sentry.commit(); return; } @@ -57497,20 +59128,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_table_with_constraints(con } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_constraint(const DropConstraintRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) { - int32_t seqid = send_drop_constraint(req); - recv_drop_constraint(seqid); + int32_t seqid = send_get_tables(db_name, pattern); + recv_get_tables(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropConstraintRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables(const std::string& db_name, const std::string& pattern) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_constraint", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_constraint_pargs args; - args.req = &req; + ThriftHiveMetastore_get_tables_pargs args; + args.db_name = &db_name; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57521,7 +59153,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_constraint(const DropCons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -57550,7 +59182,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_constraint") != 0) { + if (fname.compare("get_tables") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57559,21 +59191,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_constraint_presult result; + ThriftHiveMetastore_get_tables_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o3) { + if (result.__isset.o1) { sentry.commit(); - throw result.o3; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -57583,20 +59217,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_constraint(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_primary_key(const AddPrimaryKeyRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_tables_by_type(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) { - int32_t seqid = send_add_primary_key(req); - recv_add_primary_key(seqid); + int32_t seqid = send_get_tables_by_type(db_name, pattern, tableType); + recv_get_tables_by_type(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrimaryKeyRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_primary_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_primary_key_pargs args; - args.req = &req; + ThriftHiveMetastore_get_tables_by_type_pargs args; + args.db_name = &db_name; + args.pattern = &pattern; + args.tableType = &tableType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57607,7 +59243,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_primary_key(const AddPrima return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -57636,7 +59272,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_primary_key") != 0) { + if (fname.compare("get_tables_by_type") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57645,21 +59281,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_primary_key_presult result; + ThriftHiveMetastore_get_tables_by_type_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_tables_by_type failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -57669,20 +59307,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_primary_key(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_foreign_key(const AddForeignKeyRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_table_meta(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) { - int32_t seqid = send_add_foreign_key(req); - recv_add_foreign_key(seqid); + int32_t seqid = send_get_table_meta(db_patterns, tbl_patterns, tbl_types); + recv_get_table_meta(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForeignKeyRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_foreign_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_foreign_key_pargs args; - args.req = &req; + ThriftHiveMetastore_get_table_meta_pargs args; + args.db_patterns = &db_patterns; + args.tbl_patterns = &tbl_patterns; + args.tbl_types = &tbl_types; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57693,7 +59333,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_foreign_key(const AddForei return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -57722,7 +59362,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_foreign_key") != 0) { + if (fname.compare("get_table_meta") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57731,21 +59371,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_foreign_key_presult result; + ThriftHiveMetastore_get_table_meta_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_meta failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -57755,22 +59397,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_foreign_key(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_table(const std::string& dbname, const std::string& name, const bool deleteData) +void ThriftHiveMetastoreConcurrentClient::get_all_tables(std::vector & _return, const std::string& db_name) { - int32_t seqid = send_drop_table(dbname, name, deleteData); - recv_drop_table(seqid); + int32_t seqid = send_get_all_tables(db_name); + recv_get_all_tables(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table(const std::string& dbname, const std::string& name, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_tables(const std::string& db_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_table_pargs args; - args.dbname = &dbname; - args.name = &name; - args.deleteData = &deleteData; + ThriftHiveMetastore_get_all_tables_pargs args; + args.db_name = &db_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57781,7 +59421,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -57810,7 +59450,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_table") != 0) { + if (fname.compare("get_all_tables") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57819,21 +59459,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_table_presult result; + ThriftHiveMetastore_get_all_tables_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o3) { + if (result.__isset.o1) { sentry.commit(); - throw result.o3; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_tables failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -57843,23 +59485,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) { - int32_t seqid = send_drop_table_with_environment_context(dbname, name, deleteData, environment_context); - recv_drop_table_with_environment_context(seqid); + int32_t seqid = send_get_table(dbname, tbl_name); + recv_get_table(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table(const std::string& dbname, const std::string& tbl_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_table_with_environment_context_pargs args; + ThriftHiveMetastore_get_table_pargs args; args.dbname = &dbname; - args.name = &name; - args.deleteData = &deleteData; - args.environment_context = &environment_context; + args.tbl_name = &tbl_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57870,7 +59510,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_table_with_environment_co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -57899,7 +59539,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_conte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_table_with_environment_context") != 0) { + if (fname.compare("get_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57908,21 +59548,27 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_conte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_table_with_environment_context_presult result; + ThriftHiveMetastore_get_table_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o3) { + if (result.__isset.o2) { sentry.commit(); - throw result.o3; + throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -57932,21 +59578,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_table_with_environment_conte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern) +void ThriftHiveMetastoreConcurrentClient::get_table_objects_by_name(std::vector
& _return, const std::string& dbname, const std::vector & tbl_names) { - int32_t seqid = send_get_tables(db_name, pattern); - recv_get_tables(_return, seqid); + int32_t seqid = send_get_table_objects_by_name(dbname, tbl_names); + recv_get_table_objects_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables(const std::string& db_name, const std::string& pattern) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_tables", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_tables_pargs args; - args.db_name = &db_name; - args.pattern = &pattern; + ThriftHiveMetastore_get_table_objects_by_name_pargs args; + args.dbname = &dbname; + args.tbl_names = &tbl_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -57957,7 +59603,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::vector
& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -57986,7 +59632,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_tables") != 0) { + if (fname.compare("get_table_objects_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -57995,7 +59641,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vectorreadMessageEnd(); @@ -58010,8 +59656,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -58021,22 +59675,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables(std::vector & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) +void ThriftHiveMetastoreConcurrentClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) { - int32_t seqid = send_get_tables_by_type(db_name, pattern, tableType); - recv_get_tables_by_type(_return, seqid); + int32_t seqid = send_get_table_names_by_filter(dbname, filter, max_tables); + recv_get_table_names_by_filter(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables_by_type(const std::string& db_name, const std::string& pattern, const std::string& tableType) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_tables_by_type", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_tables_by_type_pargs args; - args.db_name = &db_name; - args.pattern = &pattern; - args.tableType = &tableType; + ThriftHiveMetastore_get_table_names_by_filter_pargs args; + args.dbname = &dbname; + args.filter = &filter; + args.max_tables = &max_tables; args.write(oprot_); oprot_->writeMessageEnd(); @@ -58047,7 +59701,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_tables_by_type(const std:: return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -58076,7 +59730,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_tables_by_type") != 0) { + if (fname.compare("get_table_names_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -58085,7 +59739,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vectorreadMessageEnd(); @@ -58100,8 +59754,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -58111,22 +59773,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_tables_by_type(std::vector & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) +void ThriftHiveMetastoreConcurrentClient::alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) { - int32_t seqid = send_get_table_meta(db_patterns, tbl_patterns, tbl_types); - recv_get_table_meta(_return, seqid); + int32_t seqid = send_alter_table(dbname, tbl_name, new_tbl); + recv_alter_table(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_meta(const std::string& db_patterns, const std::string& tbl_patterns, const std::vector & tbl_types) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_meta", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_meta_pargs args; - args.db_patterns = &db_patterns; - args.tbl_patterns = &tbl_patterns; - args.tbl_types = &tbl_types; + ThriftHiveMetastore_alter_table_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; args.write(oprot_); oprot_->writeMessageEnd(); @@ -58137,7 +59799,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_meta(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) { int32_t rseqid = 0; @@ -58166,7 +59828,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_meta") != 0) { + if (fname.compare("alter_table") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -58175,23 +59837,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_meta failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -58201,20 +59861,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_meta(std::vector & _return, const std::string& db_name) +void ThriftHiveMetastoreConcurrentClient::alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_all_tables(db_name); - recv_get_all_tables(_return, seqid); + int32_t seqid = send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context); + recv_alter_table_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_tables(const std::string& db_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_tables", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_tables_pargs args; - args.db_name = &db_name; + ThriftHiveMetastore_alter_table_with_environment_context_pargs args; + args.dbname = &dbname; + args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -58225,7 +59888,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_tables(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -58254,7 +59917,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_tables") != 0) { + if (fname.compare("alter_table_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -58263,23 +59926,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_tables failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -58289,21 +59950,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_tables(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_pargs args; + ThriftHiveMetastore_alter_table_with_cascade_pargs args; args.dbname = &dbname; args.tbl_name = &tbl_name; + args.new_tbl = &new_tbl; + args.cascade = &cascade; args.write(oprot_); oprot_->writeMessageEnd(); @@ -58314,7 +59977,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table(const std::string& d return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const int32_t seqid) { int32_t rseqid = 0; @@ -58343,7 +60006,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table") != 0) { + if (fname.compare("alter_table_with_cascade") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -58352,17 +60015,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_table_with_cascade_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -58371,8 +60028,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -58382,21 +60039,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table(Table& _return, const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_objects_by_name(std::vector
& _return, const std::string& dbname, const std::vector & tbl_names) +void ThriftHiveMetastoreConcurrentClient::add_partition(Partition& _return, const Partition& new_part) { - int32_t seqid = send_get_table_objects_by_name(dbname, tbl_names); - recv_get_table_objects_by_name(_return, seqid); + int32_t seqid = send_add_partition(new_part); + recv_add_partition(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name(const std::string& dbname, const std::vector & tbl_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition(const Partition& new_part) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_objects_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_objects_by_name_pargs args; - args.dbname = &dbname; - args.tbl_names = &tbl_names; + ThriftHiveMetastore_add_partition_pargs args; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -58407,7 +60063,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_objects_by_name(cons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::vector
& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -58436,7 +60092,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_objects_by_name") != 0) { + if (fname.compare("add_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -58445,7 +60101,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_objects_by_name_presult result; + ThriftHiveMetastore_add_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -58469,7 +60125,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_objects_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -58479,22 +60135,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_objects_by_name(std::ve } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_names_by_filter(std::vector & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) +void ThriftHiveMetastoreConcurrentClient::add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_table_names_by_filter(dbname, filter, max_tables); - recv_get_table_names_by_filter(_return, seqid); + int32_t seqid = send_add_partition_with_environment_context(new_part, environment_context); + recv_add_partition_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_names_by_filter(const std::string& dbname, const std::string& filter, const int16_t max_tables) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_names_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_names_by_filter_pargs args; - args.dbname = &dbname; - args.filter = &filter; - args.max_tables = &max_tables; + ThriftHiveMetastore_add_partition_with_environment_context_pargs args; + args.new_part = &new_part; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -58505,7 +60160,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_names_by_filter(cons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_context(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -58534,7 +60189,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_names_by_filter") != 0) { + if (fname.compare("add_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -58543,7 +60198,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_names_by_filter_presult result; + ThriftHiveMetastore_add_partition_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -58567,7 +60222,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_names_by_filter failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -58577,22 +60232,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_names_by_filter(std::ve } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) +int32_t ThriftHiveMetastoreConcurrentClient::add_partitions(const std::vector & new_parts) { - int32_t seqid = send_alter_table(dbname, tbl_name, new_tbl); - recv_alter_table(seqid); + int32_t seqid = send_add_partitions(new_parts); + return recv_add_partitions(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions(const std::vector & new_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_table", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_pargs args; - args.dbname = &dbname; - args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; + ThriftHiveMetastore_add_partitions_pargs args; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -58603,7 +60256,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t seqid) { int32_t rseqid = 0; @@ -58632,7 +60285,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table") != 0) { + if (fname.compare("add_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -58641,11 +60294,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_table_presult result; + int32_t _return; + ThriftHiveMetastore_add_partitions_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -58654,8 +60313,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -58665,23 +60328,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::add_partitions_pspec(const std::vector & new_parts) { - int32_t seqid = send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context); - recv_alter_table_with_environment_context(seqid); + int32_t seqid = send_add_partitions_pspec(new_parts); + return recv_add_partitions_pspec(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_pspec(const std::vector & new_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_table_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_with_environment_context_pargs args; - args.dbname = &dbname; - args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; - args.environment_context = &environment_context; + ThriftHiveMetastore_add_partitions_pspec_pargs args; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -58692,7 +60352,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_environment_c return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_context(const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int32_t seqid) { int32_t rseqid = 0; @@ -58721,7 +60381,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table_with_environment_context") != 0) { + if (fname.compare("add_partitions_pspec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -58730,11 +60390,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_table_with_environment_context_presult result; + int32_t _return; + ThriftHiveMetastore_add_partitions_pspec_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -58743,8 +60409,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_pspec failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -58754,23 +60424,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_environment_cont } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) +void ThriftHiveMetastoreConcurrentClient::append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { - int32_t seqid = send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade); - recv_alter_table_with_cascade(seqid); + int32_t seqid = send_append_partition(db_name, tbl_name, part_vals); + recv_append_partition(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_table_with_cascade", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_table_with_cascade_pargs args; - args.dbname = &dbname; + ThriftHiveMetastore_append_partition_pargs args; + args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_tbl = &new_tbl; - args.cascade = &cascade; + args.part_vals = &part_vals; args.write(oprot_); oprot_->writeMessageEnd(); @@ -58781,7 +60450,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_table_with_cascade(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -58810,7 +60479,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_table_with_cascade") != 0) { + if (fname.compare("append_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -58819,11 +60488,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_table_with_cascade_presult result; + ThriftHiveMetastore_append_partition_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -58832,8 +60507,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in sentry.commit(); throw result.o2; } - sentry.commit(); - return; + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -58843,20 +60522,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_table_with_cascade(const in } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_partition(Partition& _return, const Partition& new_part) +void ThriftHiveMetastoreConcurrentClient::add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request) { - int32_t seqid = send_add_partition(new_part); - recv_add_partition(_return, seqid); + int32_t seqid = send_add_partitions_req(request); + recv_add_partitions_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition(const Partition& new_part) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_req(const AddPartitionsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partition_pargs args; - args.new_part = &new_part; + ThriftHiveMetastore_add_partitions_req_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -58867,7 +60546,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition(const Partition& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -58896,7 +60575,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partition") != 0) { + if (fname.compare("add_partitions_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -58905,7 +60584,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_partition_presult result; + ThriftHiveMetastore_add_partitions_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -58929,7 +60608,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -58939,20 +60618,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition(Partition& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) { - int32_t seqid = send_add_partition_with_environment_context(new_part, environment_context); - recv_add_partition_with_environment_context(_return, seqid); + int32_t seqid = send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context); + recv_append_partition_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition_with_environment_context(const Partition& new_part, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partition_with_environment_context_pargs args; - args.new_part = &new_part; + ThriftHiveMetastore_append_partition_with_environment_context_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; args.environment_context = &environment_context; args.write(oprot_); @@ -58964,7 +60645,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partition_with_environment return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_context(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment_context(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -58993,7 +60674,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partition_with_environment_context") != 0) { + if (fname.compare("append_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59002,7 +60683,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_partition_with_environment_context_presult result; + ThriftHiveMetastore_append_partition_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -59026,7 +60707,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59036,20 +60717,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partition_with_environment_co } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::add_partitions(const std::vector & new_parts) +void ThriftHiveMetastoreConcurrentClient::append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { - int32_t seqid = send_add_partitions(new_parts); - return recv_add_partitions(seqid); + int32_t seqid = send_append_partition_by_name(db_name, tbl_name, part_name); + recv_append_partition_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions(const std::vector & new_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_pargs args; - args.new_parts = &new_parts; + ThriftHiveMetastore_append_partition_by_name_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -59060,7 +60743,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions(const std::vect return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -59089,103 +60772,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - int32_t _return; - ThriftHiveMetastore_add_partitions_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - sentry.commit(); - return _return; - } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -int32_t ThriftHiveMetastoreConcurrentClient::add_partitions_pspec(const std::vector & new_parts) -{ - int32_t seqid = send_add_partitions_pspec(new_parts); - return recv_add_partitions_pspec(seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_pspec(const std::vector & new_parts) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_add_partitions_pspec_pargs args; - args.new_parts = &new_parts; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("add_partitions_pspec") != 0) { + if (fname.compare("append_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59194,16 +60781,16 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_add_partitions_pspec_presult result; + ThriftHiveMetastore_append_partition_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -59218,7 +60805,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_pspec failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59228,22 +60815,23 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_partitions_pspec(const int } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +void ThriftHiveMetastoreConcurrentClient::append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) { - int32_t seqid = send_append_partition(db_name, tbl_name, part_vals); - recv_append_partition(_return, seqid); + int32_t seqid = send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context); + recv_append_partition_by_name_with_environment_context(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_pargs args; + ThriftHiveMetastore_append_partition_by_name_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; + args.part_name = &part_name; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -59254,7 +60842,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_environment_context(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -59283,7 +60871,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition") != 0) { + if (fname.compare("append_partition_by_name_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59292,7 +60880,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_presult result; + ThriftHiveMetastore_append_partition_by_name_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -59316,7 +60904,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu throw result.o3; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59326,20 +60914,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition(Partition& _retu } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request) +bool ThriftHiveMetastoreConcurrentClient::drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) { - int32_t seqid = send_add_partitions_req(request); - recv_add_partitions_req(_return, seqid); + int32_t seqid = send_drop_partition(db_name, tbl_name, part_vals, deleteData); + return recv_drop_partition(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_req(const AddPartitionsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_partitions_req_pargs args; - args.request = &request; + ThriftHiveMetastore_drop_partition_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -59350,7 +60941,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_partitions_req(const AddPa return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsResult& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqid) { int32_t rseqid = 0; @@ -59379,7 +60970,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_partitions_req") != 0) { + if (fname.compare("drop_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59388,16 +60979,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_partitions_req_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -59407,12 +60998,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_partitions_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59422,22 +61009,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_partitions_req(AddPartitionsR } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) +bool ThriftHiveMetastoreConcurrentClient::drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) { - int32_t seqid = send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context); - recv_append_partition_with_environment_context(_return, seqid); + int32_t seqid = send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context); + return recv_drop_partition_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_with_environment_context_pargs args; + ThriftHiveMetastore_drop_partition_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_vals = &part_vals; + args.deleteData = &deleteData; args.environment_context = &environment_context; args.write(oprot_); @@ -59449,7 +61037,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_with_environm return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment_context(Partition& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -59478,7 +61066,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_with_environment_context") != 0) { + if (fname.compare("drop_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59487,16 +61075,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_with_environment_context_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -59506,12 +61094,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59521,22 +61105,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_with_environment } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { - int32_t seqid = send_append_partition_by_name(db_name, tbl_name, part_name); - recv_append_partition_by_name(_return, seqid); + int32_t seqid = send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData); + return recv_drop_partition_by_name(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_by_name_pargs args; + ThriftHiveMetastore_drop_partition_by_name_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_name = &part_name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -59547,7 +61132,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partition& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int32_t seqid) { int32_t rseqid = 0; @@ -59576,7 +61161,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_by_name") != 0) { + if (fname.compare("drop_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59585,16 +61170,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_by_name_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -59604,12 +61189,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59619,22 +61200,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name(Partitio } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) +bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) { - int32_t seqid = send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context); - recv_append_partition_by_name_with_environment_context(_return, seqid); + int32_t seqid = send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context); + return recv_drop_partition_by_name_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("append_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_append_partition_by_name_with_environment_context_pargs args; + ThriftHiveMetastore_drop_partition_by_name_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_name = &part_name; + args.deleteData = &deleteData; args.environment_context = &environment_context; args.write(oprot_); @@ -59646,7 +61228,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_append_partition_by_name_with_ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_environment_context(Partition& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -59675,7 +61257,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("append_partition_by_name_with_environment_context") != 0) { + if (fname.compare("drop_partition_by_name_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59684,16 +61266,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_append_partition_by_name_with_environment_context_presult result; + bool _return; + ThriftHiveMetastore_drop_partition_by_name_with_environment_context_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -59703,12 +61285,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "append_partition_by_name_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name_with_environment_context failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59718,23 +61296,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_append_partition_by_name_with_env } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) +void ThriftHiveMetastoreConcurrentClient::drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) { - int32_t seqid = send_drop_partition(db_name, tbl_name, part_vals, deleteData); - return recv_drop_partition(seqid); + int32_t seqid = send_drop_partitions_req(req); + recv_drop_partitions_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partitions_req(const DropPartitionsRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.deleteData = &deleteData; + ThriftHiveMetastore_drop_partitions_req_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -59745,7 +61320,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition(const std::stri return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartitionsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -59774,7 +61349,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition") != 0) { + if (fname.compare("drop_partitions_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59783,16 +61358,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_presult result; + ThriftHiveMetastore_drop_partitions_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -59803,7 +61378,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partitions_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59813,24 +61388,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition(const int32_t seqi } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { - int32_t seqid = send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context); - return recv_drop_partition_with_environment_context(seqid); + int32_t seqid = send_get_partition(db_name, tbl_name, part_vals); + recv_get_partition(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const bool deleteData, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_with_environment_context_pargs args; + ThriftHiveMetastore_get_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_vals = &part_vals; - args.deleteData = &deleteData; - args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -59841,7 +61414,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_with_environmen return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -59870,7 +61443,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_with_environment_context") != 0) { + if (fname.compare("get_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59879,16 +61452,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_with_environment_context_presult result; + ThriftHiveMetastore_get_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -59899,7 +61472,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -59909,23 +61482,24 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_with_environment_c } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) +void ThriftHiveMetastoreConcurrentClient::exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { - int32_t seqid = send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData); - return recv_drop_partition_by_name(seqid); + int32_t seqid = send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); + recv_exchange_partition(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.deleteData = &deleteData; + ThriftHiveMetastore_exchange_partition_pargs args; + args.partitionSpecs = &partitionSpecs; + args.source_db = &source_db; + args.source_table_name = &source_table_name; + args.dest_db = &dest_db; + args.dest_table_name = &dest_table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -59936,7 +61510,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name(const s return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -59965,7 +61539,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_by_name") != 0) { + if (fname.compare("exchange_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -59974,16 +61548,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_by_name_presult result; + ThriftHiveMetastore_exchange_partition_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -59993,8 +61567,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partition failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60004,24 +61586,24 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name(const int3 } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { - int32_t seqid = send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context); - return recv_drop_partition_by_name_with_environment_context(seqid); + int32_t seqid = send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); + recv_exchange_partitions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partition_by_name_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partition_by_name_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.deleteData = &deleteData; - args.environment_context = &environment_context; + ThriftHiveMetastore_exchange_partitions_pargs args; + args.partitionSpecs = &partitionSpecs; + args.source_db = &source_db; + args.source_table_name = &source_table_name; + args.dest_db = &dest_db; + args.dest_table_name = &dest_table_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60032,7 +61614,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partition_by_name_with_en return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60061,7 +61643,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_envir iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partition_by_name_with_environment_context") != 0) { + if (fname.compare("exchange_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60070,16 +61652,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_envir using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_partition_by_name_with_environment_context_presult result; + ThriftHiveMetastore_exchange_partitions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -60089,8 +61671,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_envir sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partition_by_name_with_environment_context failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partitions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60100,20 +61690,24 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_partition_by_name_with_envir } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_drop_partitions_req(req); - recv_drop_partitions_req(_return, seqid); + int32_t seqid = send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names); + recv_get_partition_with_auth(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partitions_req(const DropPartitionsRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_partitions_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_partitions_req_pargs args; - args.req = &req; + ThriftHiveMetastore_get_partition_with_auth_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60124,7 +61718,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_partitions_req(const Drop return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartitionsResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60153,7 +61747,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_partitions_req") != 0) { + if (fname.compare("get_partition_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60162,7 +61756,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_partitions_req_presult result; + ThriftHiveMetastore_get_partition_with_auth_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -60182,7 +61776,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_partitions_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_with_auth failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60192,22 +61786,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_partitions_req(DropPartition } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +void ThriftHiveMetastoreConcurrentClient::get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { - int32_t seqid = send_get_partition(db_name, tbl_name, part_vals); - recv_get_partition(_return, seqid); + int32_t seqid = send_get_partition_by_name(db_name, tbl_name, part_name); + recv_get_partition_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_pargs args; + ThriftHiveMetastore_get_partition_by_name_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60218,7 +61812,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60247,7 +61841,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition") != 0) { + if (fname.compare("get_partition_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60256,7 +61850,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_presult result; + ThriftHiveMetastore_get_partition_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -60276,7 +61870,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60286,24 +61880,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition(Partition& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::exchange_partition(Partition& _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +void ThriftHiveMetastoreConcurrentClient::get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { - int32_t seqid = send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); - recv_exchange_partition(_return, seqid); + int32_t seqid = send_get_partitions(db_name, tbl_name, max_parts); + recv_get_partitions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partition(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("exchange_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_exchange_partition_pargs args; - args.partitionSpecs = &partitionSpecs; - args.source_db = &source_db; - args.source_table_name = &source_table_name; - args.dest_db = &dest_db; - args.dest_table_name = &dest_table_name; + ThriftHiveMetastore_get_partitions_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60314,7 +61906,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partition(const std:: return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60343,7 +61935,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("exchange_partition") != 0) { + if (fname.compare("get_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60352,7 +61944,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_exchange_partition_presult result; + ThriftHiveMetastore_get_partitions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -60371,16 +61963,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "exchange_partition failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60390,24 +61974,24 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partition(Partition& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::exchange_partitions(std::vector & _return, const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +void ThriftHiveMetastoreConcurrentClient::get_partitions_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name); - recv_exchange_partitions(_return, seqid); + int32_t seqid = send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names); + recv_get_partitions_with_auth(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partitions(const std::map & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("exchange_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_exchange_partitions_pargs args; - args.partitionSpecs = &partitionSpecs; - args.source_db = &source_db; - args.source_table_name = &source_table_name; - args.dest_db = &dest_db; - args.dest_table_name = &dest_table_name; + ThriftHiveMetastore_get_partitions_with_auth_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_parts = &max_parts; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60418,7 +62002,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_exchange_partitions(const std: return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60447,7 +62031,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("exchange_partitions") != 0) { + if (fname.compare("get_partitions_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60456,7 +62040,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

readMessageEnd(); @@ -60475,16 +62059,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

sync_.updatePending(fname, mtype, rseqid); @@ -60494,24 +62070,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_exchange_partitions(std::vector

& part_vals, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) { - int32_t seqid = send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names); - recv_get_partition_with_auth(_return, seqid); + int32_t seqid = send_get_partitions_pspec(db_name, tbl_name, max_parts); + recv_get_partitions_pspec(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_with_auth_pargs args; + ThriftHiveMetastore_get_partitions_pspec_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.user_name = &user_name; - args.group_names = &group_names; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60522,7 +62096,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_with_auth(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60551,7 +62125,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_with_auth") != 0) { + if (fname.compare("get_partitions_pspec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60560,7 +62134,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_with_auth_presult result; + ThriftHiveMetastore_get_partitions_pspec_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -60580,7 +62154,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_with_auth failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_pspec failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60590,22 +62164,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_with_auth(Partition } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +void ThriftHiveMetastoreConcurrentClient::get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { - int32_t seqid = send_get_partition_by_name(db_name, tbl_name, part_name); - recv_get_partition_by_name(_return, seqid); + int32_t seqid = send_get_partition_names(db_name, tbl_name, max_parts); + recv_get_partition_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_by_name_pargs args; + ThriftHiveMetastore_get_partition_names_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_name = &part_name; + args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -60616,7 +62190,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_by_name(const st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60645,7 +62219,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_by_name") != 0) { + if (fname.compare("get_partition_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60654,7 +62228,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_by_name_presult result; + ThriftHiveMetastore_get_partition_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -60665,16 +62239,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } if (result.__isset.o2) { sentry.commit(); throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60684,21 +62254,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_by_name(Partition& } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { - int32_t seqid = send_get_partitions(db_name, tbl_name, max_parts); - recv_get_partitions(_return, seqid); + int32_t seqid = send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + recv_get_partitions_ps(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_pargs args; + ThriftHiveMetastore_get_partitions_ps_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; + args.part_vals = &part_vals; args.max_parts = &max_parts; args.write(oprot_); @@ -60710,7 +62281,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60739,7 +62310,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions") != 0) { + if (fname.compare("get_partitions_ps") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60748,7 +62319,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vectorreadMessageEnd(); @@ -60768,7 +62339,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -60778,21 +62349,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::get_partitions_ps_with_auth(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names); - recv_get_partitions_with_auth(_return, seqid); + int32_t seqid = send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names); + recv_get_partitions_ps_with_auth(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_with_auth(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_with_auth_pargs args; + ThriftHiveMetastore_get_partitions_ps_with_auth_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; + args.part_vals = &part_vals; args.max_parts = &max_parts; args.user_name = &user_name; args.group_names = &group_names; @@ -60806,7 +62378,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_with_auth(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60835,7 +62407,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_with_auth") != 0) { + if (fname.compare("get_partitions_ps_with_auth") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60844,7 +62416,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_with_auth_presult result; + ThriftHiveMetastore_get_partitions_ps_with_auth_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -60864,7 +62436,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_with_auth failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -60874,21 +62446,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_with_auth(std::vec } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_pspec(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) +void ThriftHiveMetastoreConcurrentClient::get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { - int32_t seqid = send_get_partitions_pspec(db_name, tbl_name, max_parts); - recv_get_partitions_pspec(_return, seqid); + int32_t seqid = send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); + recv_get_partition_names_ps(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_pspec(const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_pspec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_pspec_pargs args; + ThriftHiveMetastore_get_partition_names_ps_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; + args.part_vals = &part_vals; args.max_parts = &max_parts; args.write(oprot_); @@ -60900,7 +62473,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_pspec(const std return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -60929,7 +62502,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_pspec") != 0) { + if (fname.compare("get_partition_names_ps") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -60938,7 +62511,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_pspec_presult result; + ThriftHiveMetastore_get_partition_names_ps_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -60958,97 +62531,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_pspec(std::vector< throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_pspec failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) -{ - int32_t seqid = send_get_partition_names(db_name, tbl_name, max_parts); - recv_get_partition_names(_return, seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_names", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_get_partition_names_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.max_parts = &max_parts; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vector & _return, const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("get_partition_names") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_get_partition_names_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names_ps failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61058,22 +62541,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) { - int32_t seqid = send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts); - recv_get_partitions_ps(_return, seqid); + int32_t seqid = send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + recv_get_partitions_by_filter(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_ps", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_ps_pargs args; + ThriftHiveMetastore_get_partitions_by_filter_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; + args.filter = &filter; args.max_parts = &max_parts; args.write(oprot_); @@ -61085,7 +62568,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps(const std::s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -61114,7 +62597,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_ps") != 0) { + if (fname.compare("get_partitions_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61123,7 +62606,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vectorreadMessageEnd(); @@ -61143,7 +62626,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -61153,25 +62636,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) { - int32_t seqid = send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names); - recv_get_partitions_ps_with_auth(_return, seqid); + int32_t seqid = send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts); + recv_get_part_specs_by_filter(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps_with_auth(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_ps_with_auth", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_ps_with_auth_pargs args; + ThriftHiveMetastore_get_part_specs_by_filter_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; + args.filter = &filter; args.max_parts = &max_parts; - args.user_name = &user_name; - args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61182,7 +62663,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_ps_with_auth(co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -61211,7 +62692,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_ps_with_auth") != 0) { + if (fname.compare("get_part_specs_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61220,7 +62701,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_ps_with_auth_presult result; + ThriftHiveMetastore_get_part_specs_by_filter_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -61240,7 +62721,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_ps_with_auth failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_part_specs_by_filter failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61250,23 +62731,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_ps_with_auth(std:: } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_names_ps(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +void ThriftHiveMetastoreConcurrentClient::get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req) { - int32_t seqid = send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts); - recv_get_partition_names_ps(_return, seqid); + int32_t seqid = send_get_partitions_by_expr(req); + recv_get_partitions_by_expr(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names_ps(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_expr(const PartitionsByExprRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_names_ps", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_names_ps_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.max_parts = &max_parts; + ThriftHiveMetastore_get_partitions_by_expr_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61277,7 +62755,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_names_ps(const s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(PartitionsByExprResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -61306,7 +62784,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_names_ps") != 0) { + if (fname.compare("get_partitions_by_expr") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61315,7 +62793,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_names_ps_presult result; + ThriftHiveMetastore_get_partitions_by_expr_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -61335,7 +62813,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_names_ps failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_expr failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61345,23 +62823,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_names_ps(std::vecto } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) { - int32_t seqid = send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts); - recv_get_partitions_by_filter(_return, seqid); + int32_t seqid = send_get_num_partitions_by_filter(db_name, tbl_name, filter); + return recv_get_num_partitions_by_filter(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_filter_pargs args; + ThriftHiveMetastore_get_num_partitions_by_filter_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.filter = &filter; - args.max_parts = &max_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61372,7 +62849,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_filter(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vector & _return, const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(const int32_t seqid) { int32_t rseqid = 0; @@ -61401,7 +62878,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_filter") != 0) { + if (fname.compare("get_num_partitions_by_filter") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61410,16 +62887,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_by_filter_presult result; + int32_t _return; + ThriftHiveMetastore_get_num_partitions_by_filter_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -61430,7 +62907,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_filter failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61440,23 +62917,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_filter(std::vec } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_part_specs_by_filter(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) +void ThriftHiveMetastoreConcurrentClient::get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) { - int32_t seqid = send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts); - recv_get_part_specs_by_filter(_return, seqid); + int32_t seqid = send_get_partitions_by_names(db_name, tbl_name, names); + recv_get_partitions_by_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_part_specs_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_part_specs_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_part_specs_by_filter_pargs args; + ThriftHiveMetastore_get_partitions_by_names_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.filter = &filter; - args.max_parts = &max_parts; + args.names = &names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61467,7 +62943,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_part_specs_by_filter(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -61496,7 +62972,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_part_specs_by_filter") != 0) { + if (fname.compare("get_partitions_by_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61505,7 +62981,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_part_specs_by_filter_presult result; + ThriftHiveMetastore_get_partitions_by_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -61525,7 +63001,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_part_specs_by_filter failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_names failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61535,20 +63011,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_part_specs_by_filter(std::vec } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req) +void ThriftHiveMetastoreConcurrentClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { - int32_t seqid = send_get_partitions_by_expr(req); - recv_get_partitions_by_expr(_return, seqid); + int32_t seqid = send_alter_partition(db_name, tbl_name, new_part); + recv_alter_partition(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_expr(const PartitionsByExprRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_expr_pargs args; - args.req = &req; + ThriftHiveMetastore_alter_partition_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61559,7 +63037,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_expr(const P return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(PartitionsByExprResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seqid) { int32_t rseqid = 0; @@ -61588,7 +63066,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_expr") != 0) { + if (fname.compare("alter_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61597,17 +63075,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_by_expr_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partition_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -61616,8 +63088,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_expr failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61627,22 +63099,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_expr(Partitions } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) +void ThriftHiveMetastoreConcurrentClient::alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) { - int32_t seqid = send_get_num_partitions_by_filter(db_name, tbl_name, filter); - return recv_get_num_partitions_by_filter(seqid); + int32_t seqid = send_alter_partitions(db_name, tbl_name, new_parts); + recv_alter_partitions(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_num_partitions_by_filter", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_num_partitions_by_filter_pargs args; + ThriftHiveMetastore_alter_partitions_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.filter = &filter; + args.new_parts = &new_parts; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61653,7 +63125,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_num_partitions_by_filter(c return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t seqid) { int32_t rseqid = 0; @@ -61682,7 +63154,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_num_partitions_by_filter") != 0) { + if (fname.compare("alter_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61691,17 +63163,11 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_get_num_partitions_by_filter_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partitions_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -61710,8 +63176,8 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(c sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_num_partitions_by_filter failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61721,22 +63187,23 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_get_num_partitions_by_filter(c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_by_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const std::vector & names) +void ThriftHiveMetastoreConcurrentClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { - int32_t seqid = send_get_partitions_by_names(db_name, tbl_name, names); - recv_get_partitions_by_names(_return, seqid); + int32_t seqid = send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); + recv_alter_partitions_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_names(const std::string& db_name, const std::string& tbl_name, const std::vector & names) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_by_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_by_names_pargs args; + ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.names = &names; + args.new_parts = &new_parts; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61747,7 +63214,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_by_names(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -61776,7 +63243,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_by_names") != 0) { + if (fname.compare("alter_partitions_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61785,17 +63252,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_by_names_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -61804,8 +63265,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_by_names failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -61815,22 +63276,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_by_names(std::vect } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +void ThriftHiveMetastoreConcurrentClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { - int32_t seqid = send_alter_partition(db_name, tbl_name, new_part); - recv_alter_partition(seqid); + int32_t seqid = send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); + recv_alter_partition_with_environment_context(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partition_pargs args; + ThriftHiveMetastore_alter_partition_with_environment_context_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.new_part = &new_part; + args.environment_context = &environment_context; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61841,7 +63303,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition(const std::str return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_context(const int32_t seqid) { int32_t rseqid = 0; @@ -61870,7 +63332,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partition") != 0) { + if (fname.compare("alter_partition_with_environment_context") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61879,7 +63341,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partition_presult result; + ThriftHiveMetastore_alter_partition_with_environment_context_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61903,22 +63365,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) +void ThriftHiveMetastoreConcurrentClient::rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { - int32_t seqid = send_alter_partitions(db_name, tbl_name, new_parts); - recv_alter_partitions(seqid); + int32_t seqid = send_rename_partition(db_name, tbl_name, part_vals, new_part); + recv_rename_partition(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts) +int32_t ThriftHiveMetastoreConcurrentClient::send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partitions_pargs args; + ThriftHiveMetastore_rename_partition_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.new_parts = &new_parts; + args.part_vals = &part_vals; + args.new_part = &new_part; args.write(oprot_); oprot_->writeMessageEnd(); @@ -61929,7 +63392,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t seqid) { int32_t rseqid = 0; @@ -61958,7 +63421,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partitions") != 0) { + if (fname.compare("rename_partition") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61967,7 +63430,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partitions_presult result; + ThriftHiveMetastore_rename_partition_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -61991,23 +63454,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions(const int32_t se } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +bool ThriftHiveMetastoreConcurrentClient::partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) { - int32_t seqid = send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context); - recv_alter_partitions_with_environment_context(seqid); + int32_t seqid = send_partition_name_has_valid_characters(part_vals, throw_exception); + return recv_partition_name_has_valid_characters(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector & new_parts, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partitions_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partitions_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.new_parts = &new_parts; - args.environment_context = &environment_context; + ThriftHiveMetastore_partition_name_has_valid_characters_pargs args; + args.part_vals = &part_vals; + args.throw_exception = &throw_exception; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62018,7 +63479,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partitions_with_environm return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment_context(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characters(const int32_t seqid) { int32_t rseqid = 0; @@ -62047,7 +63508,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partitions_with_environment_context") != 0) { + if (fname.compare("partition_name_has_valid_characters") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62056,21 +63517,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partitions_with_environment_context_presult result; + bool _return; + ThriftHiveMetastore_partition_name_has_valid_characters_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { sentry.commit(); - throw result.o1; + return _return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_has_valid_characters failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62080,23 +63543,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partitions_with_environment } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) +void ThriftHiveMetastoreConcurrentClient::get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) { - int32_t seqid = send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context); - recv_alter_partition_with_environment_context(seqid); + int32_t seqid = send_get_config_value(name, defaultValue); + recv_get_config_value(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_config_value(const std::string& name, const std::string& defaultValue) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_partition_with_environment_context", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_partition_with_environment_context_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.new_part = &new_part; - args.environment_context = &environment_context; + ThriftHiveMetastore_get_config_value_pargs args; + args.name = &name; + args.defaultValue = &defaultValue; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62107,7 +63568,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_partition_with_environme return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_context(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62136,7 +63597,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_ iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_partition_with_environment_context") != 0) { + if (fname.compare("get_config_value") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62145,21 +63606,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_ using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_partition_with_environment_context_presult result; + ThriftHiveMetastore_get_config_value_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_config_value failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62169,23 +63632,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_partition_with_environment_ } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) +void ThriftHiveMetastoreConcurrentClient::partition_name_to_vals(std::vector & _return, const std::string& part_name) { - int32_t seqid = send_rename_partition(db_name, tbl_name, part_vals, new_part); - recv_rename_partition(seqid); + int32_t seqid = send_partition_name_to_vals(part_name); + recv_partition_name_to_vals(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector & part_vals, const Partition& new_part) +int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_vals(const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("rename_partition", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_rename_partition_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.new_part = &new_part; + ThriftHiveMetastore_partition_name_to_vals_pargs args; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62196,7 +63656,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_rename_partition(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62225,7 +63685,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t se iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("rename_partition") != 0) { + if (fname.compare("partition_name_to_vals") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62234,21 +63694,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t se using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_rename_partition_presult result; + ThriftHiveMetastore_partition_name_to_vals_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o1; + return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_vals failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62258,21 +63720,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_rename_partition(const int32_t se } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) +void ThriftHiveMetastoreConcurrentClient::partition_name_to_spec(std::map & _return, const std::string& part_name) { - int32_t seqid = send_partition_name_has_valid_characters(part_vals, throw_exception); - return recv_partition_name_has_valid_characters(seqid); + int32_t seqid = send_partition_name_to_spec(part_name); + recv_partition_name_to_spec(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_has_valid_characters(const std::vector & part_vals, const bool throw_exception) +int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_spec(const std::string& part_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("partition_name_has_valid_characters", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_has_valid_characters_pargs args; - args.part_vals = &part_vals; - args.throw_exception = &throw_exception; + ThriftHiveMetastore_partition_name_to_spec_pargs args; + args.part_name = &part_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62283,7 +63744,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_has_valid_chara return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characters(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::map & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62312,7 +63773,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characte iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_has_valid_characters") != 0) { + if (fname.compare("partition_name_to_spec") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62321,23 +63782,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characte using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_partition_name_has_valid_characters_presult result; + ThriftHiveMetastore_partition_name_to_spec_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_has_valid_characters failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_spec failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62347,21 +63808,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_partition_name_has_valid_characte } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) +void ThriftHiveMetastoreConcurrentClient::markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { - int32_t seqid = send_get_config_value(name, defaultValue); - recv_get_config_value(_return, seqid); + int32_t seqid = send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType); + recv_markPartitionForEvent(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_config_value(const std::string& name, const std::string& defaultValue) +int32_t ThriftHiveMetastoreConcurrentClient::send_markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_config_value", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_config_value_pargs args; - args.name = &name; - args.defaultValue = &defaultValue; + ThriftHiveMetastore_markPartitionForEvent_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.eventType = &eventType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62372,7 +63835,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_config_value(const std::st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32_t seqid) { int32_t rseqid = 0; @@ -62401,7 +63864,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_config_value") != 0) { + if (fname.compare("markPartitionForEvent") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62410,23 +63873,37 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_config_value_presult result; - result.success = &_return; + ThriftHiveMetastore_markPartitionForEvent_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_config_value failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + if (result.__isset.o5) { + sentry.commit(); + throw result.o5; + } + if (result.__isset.o6) { + sentry.commit(); + throw result.o6; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62436,20 +63913,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_config_value(std::string& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::partition_name_to_vals(std::vector & _return, const std::string& part_name) +bool ThriftHiveMetastoreConcurrentClient::isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { - int32_t seqid = send_partition_name_to_vals(part_name); - recv_partition_name_to_vals(_return, seqid); + int32_t seqid = send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType); + return recv_isPartitionMarkedForEvent(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_vals(const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("partition_name_to_vals", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_to_vals_pargs args; - args.part_name = &part_name; + ThriftHiveMetastore_isPartitionMarkedForEvent_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_vals = &part_vals; + args.eventType = &eventType; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62460,7 +63940,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_vals(const s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vector & _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const int32_t seqid) { int32_t rseqid = 0; @@ -62489,7 +63969,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_to_vals") != 0) { + if (fname.compare("isPartitionMarkedForEvent") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62498,23 +63978,43 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_partition_name_to_vals_presult result; + bool _return; + ThriftHiveMetastore_isPartitionMarkedForEvent_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + if (result.__isset.o5) { + sentry.commit(); + throw result.o5; + } + if (result.__isset.o6) { + sentry.commit(); + throw result.o6; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "partition_name_to_vals failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "isPartitionMarkedForEvent failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62524,20 +64024,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_vals(std::vecto } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::partition_name_to_spec(std::map & _return, const std::string& part_name) +void ThriftHiveMetastoreConcurrentClient::add_index(Index& _return, const Index& new_index, const Table& index_table) { - int32_t seqid = send_partition_name_to_spec(part_name); - recv_partition_name_to_spec(_return, seqid); + int32_t seqid = send_add_index(new_index, index_table); + recv_add_index(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_spec(const std::string& part_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_index(const Index& new_index, const Table& index_table) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("partition_name_to_spec", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_index", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_partition_name_to_spec_pargs args; - args.part_name = &part_name; + ThriftHiveMetastore_add_index_pargs args; + args.new_index = &new_index; + args.index_table = &index_table; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62548,7 +64049,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_partition_name_to_spec(const s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::map & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62577,7 +64078,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::mapreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("partition_name_to_spec") != 0) { + if (fname.compare("add_index") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62586,7 +64087,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::mapreadMessageEnd(); @@ -62601,8 +64102,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::mapsync_.updatePending(fname, mtype, rseqid); @@ -62612,23 +64121,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_partition_name_to_spec(std::map & part_vals, const PartitionEventType::type eventType) +void ThriftHiveMetastoreConcurrentClient::alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) { - int32_t seqid = send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType); - recv_markPartitionForEvent(seqid); + int32_t seqid = send_alter_index(dbname, base_tbl_name, idx_name, new_idx); + recv_alter_index(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("markPartitionForEvent", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_markPartitionForEvent_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.eventType = &eventType; + ThriftHiveMetastore_alter_index_pargs args; + args.dbname = &dbname; + args.base_tbl_name = &base_tbl_name; + args.idx_name = &idx_name; + args.new_idx = &new_idx; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62639,7 +64148,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_markPartitionForEvent(const st return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) { int32_t rseqid = 0; @@ -62668,7 +64177,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("markPartitionForEvent") != 0) { + if (fname.compare("alter_index") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62677,7 +64186,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_markPartitionForEvent_presult result; + ThriftHiveMetastore_alter_index_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62690,22 +64199,6 @@ void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32 sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } - if (result.__isset.o5) { - sentry.commit(); - throw result.o5; - } - if (result.__isset.o6) { - sentry.commit(); - throw result.o6; - } sentry.commit(); return; } @@ -62717,23 +64210,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_markPartitionForEvent(const int32 } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +bool ThriftHiveMetastoreConcurrentClient::drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { - int32_t seqid = send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType); - return recv_isPartitionMarkedForEvent(seqid); + int32_t seqid = send_drop_index_by_name(db_name, tbl_name, index_name, deleteData); + return recv_drop_index_by_name(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map & part_vals, const PartitionEventType::type eventType) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("isPartitionMarkedForEvent", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_isPartitionMarkedForEvent_pargs args; + ThriftHiveMetastore_drop_index_by_name_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.part_vals = &part_vals; - args.eventType = &eventType; + args.index_name = &index_name; + args.deleteData = &deleteData; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62744,7 +64237,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_isPartitionMarkedForEvent(cons return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t seqid) { int32_t rseqid = 0; @@ -62773,7 +64266,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("isPartitionMarkedForEvent") != 0) { + if (fname.compare("drop_index_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62783,7 +64276,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_isPartitionMarkedForEvent_presult result; + ThriftHiveMetastore_drop_index_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -62801,24 +64294,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } - if (result.__isset.o5) { - sentry.commit(); - throw result.o5; - } - if (result.__isset.o6) { - sentry.commit(); - throw result.o6; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "isPartitionMarkedForEvent failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_index_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62828,21 +64305,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_isPartitionMarkedForEvent(const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_index(Index& _return, const Index& new_index, const Table& index_table) +void ThriftHiveMetastoreConcurrentClient::get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) { - int32_t seqid = send_add_index(new_index, index_table); - recv_add_index(_return, seqid); + int32_t seqid = send_get_index_by_name(db_name, tbl_name, index_name); + recv_get_index_by_name(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_index(const Index& new_index, const Table& index_table) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_index", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_index_pargs args; - args.new_index = &new_index; - args.index_table = &index_table; + ThriftHiveMetastore_get_index_by_name_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.index_name = &index_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62853,7 +64331,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_index(const Index& new_ind return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62882,7 +64360,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_index") != 0) { + if (fname.compare("get_index_by_name") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62891,7 +64369,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_index_presult result; + ThriftHiveMetastore_get_index_by_name_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -62910,12 +64388,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_index failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_by_name failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -62925,23 +64399,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_index(Index& _return, const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) +void ThriftHiveMetastoreConcurrentClient::get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { - int32_t seqid = send_alter_index(dbname, base_tbl_name, idx_name, new_idx); - recv_alter_index(seqid); + int32_t seqid = send_get_indexes(db_name, tbl_name, max_indexes); + recv_get_indexes(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_index(const std::string& dbname, const std::string& base_tbl_name, const std::string& idx_name, const Index& new_idx) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_indexes(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_index", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_index_pargs args; - args.dbname = &dbname; - args.base_tbl_name = &base_tbl_name; - args.idx_name = &idx_name; - args.new_idx = &new_idx; + ThriftHiveMetastore_get_indexes_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.max_indexes = &max_indexes; args.write(oprot_); oprot_->writeMessageEnd(); @@ -62952,7 +64425,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_index(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -62981,7 +64454,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_index") != 0) { + if (fname.compare("get_indexes") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -62990,11 +64463,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_index_presult result; + ThriftHiveMetastore_get_indexes_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -63003,8 +64482,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) sentry.commit(); throw result.o2; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_indexes failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63014,23 +64493,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_index(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) +void ThriftHiveMetastoreConcurrentClient::get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { - int32_t seqid = send_drop_index_by_name(db_name, tbl_name, index_name, deleteData); - return recv_drop_index_by_name(seqid); + int32_t seqid = send_get_index_names(db_name, tbl_name, max_indexes); + recv_get_index_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name, const bool deleteData) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_index_by_name_pargs args; + ThriftHiveMetastore_get_index_names_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; - args.index_name = &index_name; - args.deleteData = &deleteData; + args.max_indexes = &max_indexes; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63041,7 +64519,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_index_by_name(const std:: return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63070,7 +64548,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_index_by_name") != 0) { + if (fname.compare("get_index_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63079,16 +64557,104 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_index_by_name_presult result; + ThriftHiveMetastore_get_index_names_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_names failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_primary_keys(PrimaryKeysResponse& _return, const PrimaryKeysRequest& request) +{ + int32_t seqid = send_get_primary_keys(request); + recv_get_primary_keys(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_primary_keys(const PrimaryKeysRequest& request) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_primary_keys_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysResponse& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_primary_keys") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_primary_keys_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; } if (result.__isset.o1) { sentry.commit(); @@ -63099,7 +64665,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_index_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_primary_keys failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63109,22 +64675,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_index_by_name(const int32_t } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) +void ThriftHiveMetastoreConcurrentClient::get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) { - int32_t seqid = send_get_index_by_name(db_name, tbl_name, index_name); - recv_get_index_by_name(_return, seqid); + int32_t seqid = send_get_foreign_keys(request); + recv_get_foreign_keys(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& index_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_foreign_keys(const ForeignKeysRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_index_by_name", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_index_by_name_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.index_name = &index_name; + ThriftHiveMetastore_get_foreign_keys_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63135,7 +64699,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_by_name(const std::s return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63164,7 +64728,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_index_by_name") != 0) { + if (fname.compare("get_foreign_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63173,7 +64737,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_index_by_name_presult result; + ThriftHiveMetastore_get_foreign_keys_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -63193,7 +64757,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_by_name failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_foreign_keys failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63203,22 +64767,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_by_name(Index& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +bool ThriftHiveMetastoreConcurrentClient::update_table_column_statistics(const ColumnStatistics& stats_obj) { - int32_t seqid = send_get_indexes(db_name, tbl_name, max_indexes); - recv_get_indexes(_return, seqid); + int32_t seqid = send_update_table_column_statistics(stats_obj); + return recv_update_table_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_indexes(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_table_column_statistics(const ColumnStatistics& stats_obj) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_indexes", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_indexes_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.max_indexes = &max_indexes; + ThriftHiveMetastore_update_table_column_statistics_pargs args; + args.stats_obj = &stats_obj; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63229,7 +64791,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_indexes(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -63258,7 +64820,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_indexes") != 0) { + if (fname.compare("update_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63267,16 +64829,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_indexes_presult result; + bool _return; + ThriftHiveMetastore_update_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -63286,8 +64848,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_indexes failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_table_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63297,22 +64867,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_indexes(std::vector & } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +bool ThriftHiveMetastoreConcurrentClient::update_partition_column_statistics(const ColumnStatistics& stats_obj) { - int32_t seqid = send_get_index_names(db_name, tbl_name, max_indexes); - recv_get_index_names(_return, seqid); + int32_t seqid = send_update_partition_column_statistics(stats_obj); + return recv_update_partition_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_partition_column_statistics(const ColumnStatistics& stats_obj) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_index_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_index_names_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.max_indexes = &max_indexes; + ThriftHiveMetastore_update_partition_column_statistics_pargs args; + args.stats_obj = &stats_obj; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63323,7 +64891,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_index_names(const std::str return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vector & _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -63352,7 +64920,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_index_names") != 0) { + if (fname.compare("update_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63361,23 +64929,35 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; } if (result.__isset.o2) { sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_names failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_partition_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63387,20 +64967,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_primary_keys_pargs args; - args.request = &request; + ThriftHiveMetastore_get_table_column_statistics_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63411,7 +64993,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_primary_keys(const Primary return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(ColumnStatistics& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63440,7 +65022,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_primary_keys") != 0) { + if (fname.compare("get_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63449,7 +65031,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_primary_keys_presult result; + ThriftHiveMetastore_get_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -63468,8 +65050,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_primary_keys failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63479,20 +65069,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(PrimaryKeysRespo } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) +void ThriftHiveMetastoreConcurrentClient::get_partition_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { - int32_t seqid = send_get_foreign_keys(request); - recv_get_foreign_keys(_return, seqid); + int32_t seqid = send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name); + recv_get_partition_column_statistics(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_foreign_keys(const ForeignKeysRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_foreign_keys_pargs args; - args.request = &request; + ThriftHiveMetastore_get_partition_column_statistics_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.part_name = &part_name; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63503,7 +65096,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_foreign_keys(const Foreign return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(ColumnStatistics& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63532,7 +65125,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysRespo iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_foreign_keys") != 0) { + if (fname.compare("get_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63541,7 +65134,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysRespo using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_foreign_keys_presult result; + ThriftHiveMetastore_get_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -63560,8 +65153,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysRespo sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_foreign_keys failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63571,20 +65172,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(ForeignKeysRespo } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::update_table_column_statistics(const ColumnStatistics& stats_obj) +void ThriftHiveMetastoreConcurrentClient::get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) { - int32_t seqid = send_update_table_column_statistics(stats_obj); - return recv_update_table_column_statistics(seqid); + int32_t seqid = send_get_table_statistics_req(request); + recv_get_table_statistics_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_table_column_statistics(const ColumnStatistics& stats_obj) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_statistics_req(const TableStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("update_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_table_column_statistics_pargs args; - args.stats_obj = &stats_obj; + ThriftHiveMetastore_get_table_statistics_req_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63595,7 +65196,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_update_table_column_statistics return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableStatsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63624,7 +65225,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_table_column_statistics") != 0) { + if (fname.compare("get_table_statistics_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63633,16 +65234,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_update_table_column_statistics_presult result; + ThriftHiveMetastore_get_table_statistics_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -63652,16 +65253,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_statistics_req failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63671,20 +65264,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_table_column_statistics(co } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::update_partition_column_statistics(const ColumnStatistics& stats_obj) +void ThriftHiveMetastoreConcurrentClient::get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) { - int32_t seqid = send_update_partition_column_statistics(stats_obj); - return recv_update_partition_column_statistics(seqid); + int32_t seqid = send_get_partitions_statistics_req(request); + recv_get_partitions_statistics_req(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_update_partition_column_statistics(const ColumnStatistics& stats_obj) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_statistics_req(const PartitionsStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("update_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_update_partition_column_statistics_pargs args; - args.stats_obj = &stats_obj; + ThriftHiveMetastore_get_partitions_statistics_req_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63695,7 +65288,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_update_partition_column_statis return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistics(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(PartitionsStatsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -63724,7 +65317,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("update_partition_column_statistics") != 0) { + if (fname.compare("get_partitions_statistics_req") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63733,16 +65326,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_update_partition_column_statistics_presult result; + ThriftHiveMetastore_get_partitions_statistics_req_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -63752,16 +65345,100 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic sentry.commit(); throw result.o2; } - if (result.__isset.o3) { + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_statistics_req failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) +{ + int32_t seqid = send_get_aggr_stats_for(request); + recv_get_aggr_stats_for(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_aggr_stats_for(const PartitionsStatsRequest& request) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_aggr_stats_for_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); sentry.commit(); - throw result.o3; + throw x; } - if (result.__isset.o4) { + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_aggr_stats_for") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_aggr_stats_for_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o4; + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "update_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63771,22 +65448,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_update_partition_column_statistic } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +bool ThriftHiveMetastoreConcurrentClient::set_aggr_stats_for(const SetPartitionsStatsRequest& request) { - int32_t seqid = send_get_table_column_statistics(db_name, tbl_name, col_name); - recv_get_table_column_statistics(_return, seqid); + int32_t seqid = send_set_aggr_stats_for(request); + return recv_set_aggr_stats_for(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_set_aggr_stats_for(const SetPartitionsStatsRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_column_statistics_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.col_name = &col_name; + ThriftHiveMetastore_set_aggr_stats_for_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -63797,7 +65472,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_column_statistics(co return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(ColumnStatistics& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t seqid) { int32_t rseqid = 0; @@ -63826,7 +65501,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_column_statistics") != 0) { + if (fname.compare("set_aggr_stats_for") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63835,16 +65510,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_column_statistics_presult result; + bool _return; + ThriftHiveMetastore_set_aggr_stats_for_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -63863,7 +65538,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63873,19 +65548,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_column_statistics(Colum } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partition_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +bool ThriftHiveMetastoreConcurrentClient::delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { - int32_t seqid = send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name); - recv_get_partition_column_statistics(_return, seqid); + int32_t seqid = send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); + return recv_delete_partition_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partition_column_statistics_pargs args; + ThriftHiveMetastore_delete_partition_column_statistics_pargs args; args.db_name = &db_name; args.tbl_name = &tbl_name; args.part_name = &part_name; @@ -63900,7 +65575,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partition_column_statistic return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(ColumnStatistics& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -63929,7 +65604,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partition_column_statistics") != 0) { + if (fname.compare("delete_partition_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -63938,16 +65613,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partition_column_statistics_presult result; + bool _return; + ThriftHiveMetastore_delete_partition_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -63966,7 +65641,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C throw result.o4; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_partition_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -63976,20 +65651,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partition_column_statistics(C } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) +bool ThriftHiveMetastoreConcurrentClient::delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { - int32_t seqid = send_get_table_statistics_req(request); - recv_get_table_statistics_req(_return, seqid); + int32_t seqid = send_delete_table_column_statistics(db_name, tbl_name, col_name); + return recv_delete_table_column_statistics(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_statistics_req(const TableStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_table_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_table_statistics_req_pargs args; - args.request = &request; + ThriftHiveMetastore_delete_table_column_statistics_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.col_name = &col_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64000,7 +65677,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_table_statistics_req(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableStatsResult& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(const int32_t seqid) { int32_t rseqid = 0; @@ -64029,7 +65706,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_table_statistics_req") != 0) { + if (fname.compare("delete_table_column_statistics") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64038,16 +65715,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_table_statistics_req_presult result; + bool _return; + ThriftHiveMetastore_delete_table_column_statistics_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); @@ -64057,8 +65734,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta sentry.commit(); throw result.o2; } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_table_statistics_req failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_table_column_statistics failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64068,20 +65753,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_table_statistics_req(TableSta } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::create_function(const Function& func) { - int32_t seqid = send_get_partitions_statistics_req(request); - recv_get_partitions_statistics_req(_return, seqid); + int32_t seqid = send_create_function(func); + recv_create_function(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_statistics_req(const PartitionsStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_function(const Function& func) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_partitions_statistics_req", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_partitions_statistics_req_pargs args; - args.request = &request; + ThriftHiveMetastore_create_function_pargs args; + args.func = &func; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64092,7 +65777,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_partitions_statistics_req( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(PartitionsStatsResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seqid) { int32_t rseqid = 0; @@ -64121,7 +65806,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_partitions_statistics_req") != 0) { + if (fname.compare("create_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64130,17 +65815,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_partitions_statistics_req_presult result; - result.success = &_return; + ThriftHiveMetastore_create_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -64149,8 +65828,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_partitions_statistics_req failed: unknown result"); + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + if (result.__isset.o4) { + sentry.commit(); + throw result.o4; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64160,20 +65847,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_partitions_statistics_req(Par } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::drop_function(const std::string& dbName, const std::string& funcName) { - int32_t seqid = send_get_aggr_stats_for(request); - recv_get_aggr_stats_for(_return, seqid); + int32_t seqid = send_drop_function(dbName, funcName); + recv_drop_function(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_aggr_stats_for(const PartitionsStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_function(const std::string& dbName, const std::string& funcName) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_aggr_stats_for_pargs args; - args.request = &request; + ThriftHiveMetastore_drop_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64184,7 +65872,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_aggr_stats_for(const Parti return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid) { int32_t rseqid = 0; @@ -64213,7 +65901,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_aggr_stats_for") != 0) { + if (fname.compare("drop_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64222,27 +65910,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_aggr_stats_for_presult result; - result.success = &_return; + ThriftHiveMetastore_drop_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { + if (result.__isset.o3) { sentry.commit(); - throw result.o2; + throw result.o3; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64252,20 +65934,22 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_aggr_stats_for(AggrStats& _re } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::set_aggr_stats_for(const SetPartitionsStatsRequest& request) +void ThriftHiveMetastoreConcurrentClient::alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) { - int32_t seqid = send_set_aggr_stats_for(request); - return recv_set_aggr_stats_for(seqid); + int32_t seqid = send_alter_function(dbName, funcName, newFunc); + recv_alter_function(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_set_aggr_stats_for(const SetPartitionsStatsRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_set_aggr_stats_for_pargs args; - args.request = &request; + ThriftHiveMetastore_alter_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; + args.newFunc = &newFunc; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64276,7 +65960,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_set_aggr_stats_for(const SetPa return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqid) { int32_t rseqid = 0; @@ -64305,7 +65989,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("set_aggr_stats_for") != 0) { + if (fname.compare("alter_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64314,17 +65998,11 @@ bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_set_aggr_stats_for_presult result; - result.success = &_return; + ThriftHiveMetastore_alter_function_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -64333,16 +66011,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64352,23 +66022,21 @@ bool ThriftHiveMetastoreConcurrentClient::recv_set_aggr_stats_for(const int32_t } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +void ThriftHiveMetastoreConcurrentClient::get_functions(std::vector & _return, const std::string& dbName, const std::string& pattern) { - int32_t seqid = send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); - return recv_delete_partition_column_statistics(seqid); + int32_t seqid = send_get_functions(dbName, pattern); + recv_get_functions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_functions(const std::string& dbName, const std::string& pattern) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("delete_partition_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_delete_partition_column_statistics_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.part_name = &part_name; - args.col_name = &col_name; + ThriftHiveMetastore_get_functions_pargs args; + args.dbName = &dbName; + args.pattern = &pattern; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64379,7 +66047,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_delete_partition_column_statis return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistics(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64408,7 +66076,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("delete_partition_column_statistics") != 0) { + if (fname.compare("get_functions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64417,35 +66085,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_delete_partition_column_statistics_presult result; + ThriftHiveMetastore_get_functions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_partition_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_functions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64455,22 +66111,21 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_partition_column_statistic } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +void ThriftHiveMetastoreConcurrentClient::get_function(Function& _return, const std::string& dbName, const std::string& funcName) { - int32_t seqid = send_delete_table_column_statistics(db_name, tbl_name, col_name); - return recv_delete_table_column_statistics(seqid); + int32_t seqid = send_get_function(dbName, funcName); + recv_get_function(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_function(const std::string& dbName, const std::string& funcName) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("delete_table_column_statistics", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_function", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_delete_table_column_statistics_pargs args; - args.db_name = &db_name; - args.tbl_name = &tbl_name; - args.col_name = &col_name; + ThriftHiveMetastore_get_function_pargs args; + args.dbName = &dbName; + args.funcName = &funcName; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64481,7 +66136,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_delete_table_column_statistics return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64510,7 +66165,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("delete_table_column_statistics") != 0) { + if (fname.compare("get_function") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64519,16 +66174,16 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_delete_table_column_statistics_presult result; + ThriftHiveMetastore_get_function_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); @@ -64538,16 +66193,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } - if (result.__isset.o4) { - sentry.commit(); - throw result.o4; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "delete_table_column_statistics failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_function failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64557,20 +66204,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_delete_table_column_statistics(co } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::create_function(const Function& func) +void ThriftHiveMetastoreConcurrentClient::get_all_functions(GetAllFunctionsResponse& _return) { - int32_t seqid = send_create_function(func); - recv_create_function(seqid); + int32_t seqid = send_get_all_functions(); + recv_get_all_functions(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_function(const Function& func) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_functions() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_function_pargs args; - args.func = &func; + ThriftHiveMetastore_get_all_functions_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64581,7 +66227,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_function(const Function return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctionsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64610,7 +66256,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seq iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_function") != 0) { + if (fname.compare("get_all_functions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64619,29 +66265,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seq using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_create_function_presult result; + ThriftHiveMetastore_get_all_functions_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o3; + return; } - if (result.__isset.o4) { + if (result.__isset.o1) { sentry.commit(); - throw result.o4; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_functions failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64651,21 +66291,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_create_function(const int32_t seq } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::drop_function(const std::string& dbName, const std::string& funcName) +bool ThriftHiveMetastoreConcurrentClient::create_role(const Role& role) { - int32_t seqid = send_drop_function(dbName, funcName); - recv_drop_function(seqid); + int32_t seqid = send_create_role(role); + return recv_create_role(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_function(const std::string& dbName, const std::string& funcName) +int32_t ThriftHiveMetastoreConcurrentClient::send_create_role(const Role& role) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("create_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; + ThriftHiveMetastore_create_role_pargs args; + args.role = &role; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64676,7 +66315,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_function(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) { int32_t rseqid = 0; @@ -64705,7 +66344,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_function") != 0) { + if (fname.compare("create_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64714,21 +66353,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_drop_function_presult result; + bool _return; + ThriftHiveMetastore_create_role_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { sentry.commit(); - throw result.o1; + return _return; } - if (result.__isset.o3) { + if (result.__isset.o1) { sentry.commit(); - throw result.o3; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64738,22 +66379,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_drop_function(const int32_t seqid } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) +bool ThriftHiveMetastoreConcurrentClient::drop_role(const std::string& role_name) { - int32_t seqid = send_alter_function(dbName, funcName, newFunc); - recv_alter_function(seqid); + int32_t seqid = send_drop_role(role_name); + return recv_drop_role(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) +int32_t ThriftHiveMetastoreConcurrentClient::send_drop_role(const std::string& role_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("alter_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_alter_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; - args.newFunc = &newFunc; + ThriftHiveMetastore_drop_role_pargs args; + args.role_name = &role_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64764,7 +66403,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_alter_function(const std::stri return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) { int32_t rseqid = 0; @@ -64793,7 +66432,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqi iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("alter_function") != 0) { + if (fname.compare("drop_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64802,21 +66441,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqi using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_alter_function_presult result; + bool _return; + ThriftHiveMetastore_drop_role_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { + if (result.__isset.success) { sentry.commit(); - throw result.o1; + return _return; } - if (result.__isset.o2) { + if (result.__isset.o1) { sentry.commit(); - throw result.o2; + throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -64826,21 +66467,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_alter_function(const int32_t seqi } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_functions(std::vector & _return, const std::string& dbName, const std::string& pattern) +void ThriftHiveMetastoreConcurrentClient::get_role_names(std::vector & _return) { - int32_t seqid = send_get_functions(dbName, pattern); - recv_get_functions(_return, seqid); + int32_t seqid = send_get_role_names(); + recv_get_role_names(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_functions(const std::string& dbName, const std::string& pattern) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_names() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_functions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_functions_pargs args; - args.dbName = &dbName; - args.pattern = &pattern; + ThriftHiveMetastore_get_role_names_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64851,7 +66490,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_functions(const std::strin return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -64880,7 +66519,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_functions") != 0) { + if (fname.compare("get_role_names") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64889,7 +66528,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorreadMessageEnd(); @@ -64905,7 +66544,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -64915,21 +66554,25 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_functions(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_function", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_function_pargs args; - args.dbName = &dbName; - args.funcName = &funcName; + ThriftHiveMetastore_grant_role_pargs args; + args.role_name = &role_name; + args.principal_name = &principal_name; + args.principal_type = &principal_type; + args.grantor = &grantor; + args.grantorType = &grantorType; + args.grant_option = &grant_option; args.write(oprot_); oprot_->writeMessageEnd(); @@ -64940,7 +66583,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_function(const std::string return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) { int32_t rseqid = 0; @@ -64969,7 +66612,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_function") != 0) { + if (fname.compare("grant_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -64978,27 +66621,113 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_function_presult result; + bool _return; + ThriftHiveMetastore_grant_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - if (result.__isset.o2) { + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_role failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +bool ThriftHiveMetastoreConcurrentClient::revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) +{ + int32_t seqid = send_revoke_role(role_name, principal_name, principal_type); + return recv_revoke_role(seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_revoke_role_pargs args; + args.role_name = &role_name; + args.principal_name = &principal_name; + args.principal_type = &principal_type; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); sentry.commit(); - throw result.o2; + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("revoke_role") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + bool _return; + ThriftHiveMetastore_revoke_role_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + sentry.commit(); + return _return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_function failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65008,19 +66737,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_function(Function& _return, c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_all_functions(GetAllFunctionsResponse& _return) +void ThriftHiveMetastoreConcurrentClient::list_roles(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type) { - int32_t seqid = send_get_all_functions(); - recv_get_all_functions(_return, seqid); + int32_t seqid = send_list_roles(principal_name, principal_type); + recv_list_roles(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_functions() +int32_t ThriftHiveMetastoreConcurrentClient::send_list_roles(const std::string& principal_name, const PrincipalType::type principal_type) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_functions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_functions_pargs args; + ThriftHiveMetastore_list_roles_pargs args; + args.principal_name = &principal_name; + args.principal_type = &principal_type; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65031,7 +66762,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_functions() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctionsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65060,7 +66791,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_functions") != 0) { + if (fname.compare("list_roles") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65069,7 +66800,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_all_functions_presult result; + ThriftHiveMetastore_list_roles_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -65085,7 +66816,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_functions failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_roles failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65095,20 +66826,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_functions(GetAllFunctions } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::create_role(const Role& role) +void ThriftHiveMetastoreConcurrentClient::grant_revoke_role(GrantRevokeRoleResponse& _return, const GrantRevokeRoleRequest& request) { - int32_t seqid = send_create_role(role); - return recv_create_role(seqid); + int32_t seqid = send_grant_revoke_role(request); + recv_grant_revoke_role(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_create_role(const Role& role) +int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_role(const GrantRevokeRoleRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("create_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_create_role_pargs args; - args.role = &role; + ThriftHiveMetastore_grant_revoke_role_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65119,7 +66850,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_create_role(const Role& role) return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRoleResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65148,7 +66879,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("create_role") != 0) { + if (fname.compare("grant_revoke_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65157,23 +66888,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_create_role_presult result; + ThriftHiveMetastore_grant_revoke_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "create_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65183,20 +66914,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_create_role(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::drop_role(const std::string& role_name) +void ThriftHiveMetastoreConcurrentClient::get_principals_in_role(GetPrincipalsInRoleResponse& _return, const GetPrincipalsInRoleRequest& request) { - int32_t seqid = send_drop_role(role_name); - return recv_drop_role(seqid); + int32_t seqid = send_get_principals_in_role(request); + recv_get_principals_in_role(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_drop_role(const std::string& role_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_principals_in_role(const GetPrincipalsInRoleRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("drop_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_drop_role_pargs args; - args.role_name = &role_name; + ThriftHiveMetastore_get_principals_in_role_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65207,7 +66938,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_drop_role(const std::string& r return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincipalsInRoleResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65236,7 +66967,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("drop_role") != 0) { + if (fname.compare("get_principals_in_role") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65245,23 +66976,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_drop_role_presult result; + ThriftHiveMetastore_get_principals_in_role_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "drop_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_principals_in_role failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65271,19 +67002,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_drop_role(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_role_names(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const GetRoleGrantsForPrincipalRequest& request) { - int32_t seqid = send_get_role_names(); - recv_get_role_names(_return, seqid); + int32_t seqid = send_get_role_grants_for_principal(request); + recv_get_role_grants_for_principal(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_names() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_grants_for_principal(const GetRoleGrantsForPrincipalRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_role_names", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_role_names_pargs args; + ThriftHiveMetastore_get_role_grants_for_principal_pargs args; + args.request = &request; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65294,7 +67026,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_names() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65323,7 +67055,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_role_names") != 0) { + if (fname.compare("get_role_grants_for_principal") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65332,7 +67064,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vectorreadMessageEnd(); @@ -65348,100 +67080,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_names(std::vectorsync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -bool ThriftHiveMetastoreConcurrentClient::grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) -{ - int32_t seqid = send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option); - return recv_grant_role(seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_role", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_grant_role_pargs args; - args.role_name = &role_name; - args.principal_name = &principal_name; - args.principal_type = &principal_type; - args.grantor = &grantor; - args.grantorType = &grantorType; - args.grant_option = &grant_option; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("grant_role") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - bool _return; - ThriftHiveMetastore_grant_role_presult result; - result.success = &_return; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.success) { - sentry.commit(); - return _return; - } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_grants_for_principal failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65451,22 +67090,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_role(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) +void ThriftHiveMetastoreConcurrentClient::get_privilege_set(PrincipalPrivilegeSet& _return, const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_revoke_role(role_name, principal_name, principal_type); - return recv_revoke_role(seqid); + int32_t seqid = send_get_privilege_set(hiveObject, user_name, group_names); + recv_get_privilege_set(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_privilege_set(const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_revoke_role_pargs args; - args.role_name = &role_name; - args.principal_name = &principal_name; - args.principal_type = &principal_type; + ThriftHiveMetastore_get_privilege_set_pargs args; + args.hiveObject = &hiveObject; + args.user_name = &user_name; + args.group_names = &group_names; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65477,7 +67116,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_role(const std::string& return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivilegeSet& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65506,7 +67145,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("revoke_role") != 0) { + if (fname.compare("get_privilege_set") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65515,23 +67154,23 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_revoke_role_presult result; + ThriftHiveMetastore_get_privilege_set_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_privilege_set failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65541,21 +67180,22 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_role(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::list_roles(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type) +void ThriftHiveMetastoreConcurrentClient::list_privileges(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) { - int32_t seqid = send_list_roles(principal_name, principal_type); - recv_list_roles(_return, seqid); + int32_t seqid = send_list_privileges(principal_name, principal_type, hiveObject); + recv_list_privileges(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_list_roles(const std::string& principal_name, const PrincipalType::type principal_type) +int32_t ThriftHiveMetastoreConcurrentClient::send_list_privileges(const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("list_roles", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_list_roles_pargs args; + ThriftHiveMetastore_list_privileges_pargs args; args.principal_name = &principal_name; args.principal_type = &principal_type; + args.hiveObject = &hiveObject; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65566,7 +67206,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_list_roles(const std::string& return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65595,7 +67235,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _r iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("list_roles") != 0) { + if (fname.compare("list_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65604,7 +67244,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _r using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_list_roles_presult result; + ThriftHiveMetastore_list_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -65620,7 +67260,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _r throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_roles failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "list_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65630,20 +67270,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_roles(std::vector & _r } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::grant_revoke_role(GrantRevokeRoleResponse& _return, const GrantRevokeRoleRequest& request) +bool ThriftHiveMetastoreConcurrentClient::grant_privileges(const PrivilegeBag& privileges) { - int32_t seqid = send_grant_revoke_role(request); - recv_grant_revoke_role(_return, seqid); + int32_t seqid = send_grant_privileges(privileges); + return recv_grant_privileges(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_role(const GrantRevokeRoleRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_grant_privileges(const PrivilegeBag& privileges) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_revoke_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_revoke_role_pargs args; - args.request = &request; + ThriftHiveMetastore_grant_privileges_pargs args; + args.privileges = &privileges; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65654,7 +67294,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_role(const GrantR return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRoleResponse& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t seqid) { int32_t rseqid = 0; @@ -65683,7 +67323,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRole iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_revoke_role") != 0) { + if (fname.compare("grant_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65692,23 +67332,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRole using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_grant_revoke_role_presult result; + bool _return; + ThriftHiveMetastore_grant_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65718,20 +67358,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_role(GrantRevokeRole } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_principals_in_role(GetPrincipalsInRoleResponse& _return, const GetPrincipalsInRoleRequest& request) +bool ThriftHiveMetastoreConcurrentClient::revoke_privileges(const PrivilegeBag& privileges) { - int32_t seqid = send_get_principals_in_role(request); - recv_get_principals_in_role(_return, seqid); + int32_t seqid = send_revoke_privileges(privileges); + return recv_revoke_privileges(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_principals_in_role(const GetPrincipalsInRoleRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_privileges(const PrivilegeBag& privileges) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_principals_in_role", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_principals_in_role_pargs args; - args.request = &request; + ThriftHiveMetastore_revoke_privileges_pargs args; + args.privileges = &privileges; args.write(oprot_); oprot_->writeMessageEnd(); @@ -65742,7 +67382,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_principals_in_role(const G return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincipalsInRoleResponse& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t seqid) { int32_t rseqid = 0; @@ -65771,7 +67411,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_principals_in_role") != 0) { + if (fname.compare("revoke_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65780,23 +67420,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_principals_in_role_presult result; + bool _return; + ThriftHiveMetastore_revoke_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; + return _return; } if (result.__isset.o1) { sentry.commit(); throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_principals_in_role failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65806,19 +67446,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_principals_in_role(GetPrincip } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const GetRoleGrantsForPrincipalRequest& request) +void ThriftHiveMetastoreConcurrentClient::grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const GrantRevokePrivilegeRequest& request) { - int32_t seqid = send_get_role_grants_for_principal(request); - recv_get_role_grants_for_principal(_return, seqid); + int32_t seqid = send_grant_revoke_privileges(request); + recv_grant_revoke_privileges(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_grants_for_principal(const GetRoleGrantsForPrincipalRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_privileges(const GrantRevokePrivilegeRequest& request) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_role_grants_for_principal", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_role_grants_for_principal_pargs args; + ThriftHiveMetastore_grant_revoke_privileges_pargs args; args.request = &request; args.write(oprot_); @@ -65830,7 +67470,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_role_grants_for_principal( return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65859,7 +67499,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_role_grants_for_principal") != 0) { + if (fname.compare("grant_revoke_privileges") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65868,7 +67508,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_role_grants_for_principal_presult result; + ThriftHiveMetastore_grant_revoke_privileges_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -65884,7 +67524,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_role_grants_for_principal failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_privileges failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65894,20 +67534,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_role_grants_for_principal(Get } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_privilege_set(PrincipalPrivilegeSet& _return, const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) +void ThriftHiveMetastoreConcurrentClient::set_ugi(std::vector & _return, const std::string& user_name, const std::vector & group_names) { - int32_t seqid = send_get_privilege_set(hiveObject, user_name, group_names); - recv_get_privilege_set(_return, seqid); + int32_t seqid = send_set_ugi(user_name, group_names); + recv_set_ugi(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_privilege_set(const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_set_ugi(const std::string& user_name, const std::vector & group_names) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_privilege_set", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_privilege_set_pargs args; - args.hiveObject = &hiveObject; + ThriftHiveMetastore_set_ugi_pargs args; args.user_name = &user_name; args.group_names = &group_names; args.write(oprot_); @@ -65920,7 +67559,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_privilege_set(const HiveOb return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivilegeSet& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -65949,7 +67588,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_privilege_set") != 0) { + if (fname.compare("set_ugi") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -65958,7 +67597,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_privilege_set_presult result; + ThriftHiveMetastore_set_ugi_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -65974,7 +67613,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_privilege_set failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_ugi failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -65984,22 +67623,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_privilege_set(PrincipalPrivil } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::list_privileges(std::vector & _return, const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) +void ThriftHiveMetastoreConcurrentClient::get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) { - int32_t seqid = send_list_privileges(principal_name, principal_type, hiveObject); - recv_list_privileges(_return, seqid); + int32_t seqid = send_get_delegation_token(token_owner, renewer_kerberos_principal_name); + recv_get_delegation_token(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_list_privileges(const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_delegation_token(const std::string& token_owner, const std::string& renewer_kerberos_principal_name) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("list_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_list_privileges_pargs args; - args.principal_name = &principal_name; - args.principal_type = &principal_type; - args.hiveObject = &hiveObject; + ThriftHiveMetastore_get_delegation_token_pargs args; + args.token_owner = &token_owner; + args.renewer_kerberos_principal_name = &renewer_kerberos_principal_name; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66010,7 +67648,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_list_privileges(const std::str return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66039,7 +67677,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("list_privileges") != 0) { + if (fname.compare("get_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66048,7 +67686,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vectorreadMessageEnd(); @@ -66064,7 +67702,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vectorsync_.updatePending(fname, mtype, rseqid); @@ -66074,20 +67712,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_list_privileges(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_privileges_pargs args; - args.privileges = &privileges; + ThriftHiveMetastore_renew_delegation_token_pargs args; + args.token_str_form = &token_str_form; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66098,7 +67736,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_privileges(const Privile return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t seqid) +int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const int32_t seqid) { int32_t rseqid = 0; @@ -66127,7 +67765,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_privileges") != 0) { + if (fname.compare("renew_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66136,8 +67774,8 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_grant_privileges_presult result; + int64_t _return; + ThriftHiveMetastore_renew_delegation_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -66152,7 +67790,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se throw result.o1; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_privileges failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "renew_delegation_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66162,20 +67800,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_grant_privileges(const int32_t se } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::revoke_privileges(const PrivilegeBag& privileges) +void ThriftHiveMetastoreConcurrentClient::cancel_delegation_token(const std::string& token_str_form) { - int32_t seqid = send_revoke_privileges(privileges); - return recv_revoke_privileges(seqid); + int32_t seqid = send_cancel_delegation_token(token_str_form); + recv_cancel_delegation_token(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_privileges(const PrivilegeBag& privileges) +int32_t ThriftHiveMetastoreConcurrentClient::send_cancel_delegation_token(const std::string& token_str_form) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_revoke_privileges_pargs args; - args.privileges = &privileges; + ThriftHiveMetastore_cancel_delegation_token_pargs args; + args.token_str_form = &token_str_form; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66186,7 +67824,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_revoke_privileges(const Privil return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int32_t seqid) { int32_t rseqid = 0; @@ -66215,7 +67853,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("revoke_privileges") != 0) { + if (fname.compare("cancel_delegation_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66224,23 +67862,17 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_revoke_privileges_presult result; - result.success = &_return; + ThriftHiveMetastore_cancel_delegation_token_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - sentry.commit(); - return _return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "revoke_privileges failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66250,20 +67882,21 @@ bool ThriftHiveMetastoreConcurrentClient::recv_revoke_privileges(const int32_t s } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const GrantRevokePrivilegeRequest& request) +bool ThriftHiveMetastoreConcurrentClient::add_token(const std::string& token_identifier, const std::string& delegation_token) { - int32_t seqid = send_grant_revoke_privileges(request); - recv_grant_revoke_privileges(_return, seqid); + int32_t seqid = send_add_token(token_identifier, delegation_token); + return recv_add_token(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_privileges(const GrantRevokePrivilegeRequest& request) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_token(const std::string& token_identifier, const std::string& delegation_token) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("grant_revoke_privileges", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_grant_revoke_privileges_pargs args; - args.request = &request; + ThriftHiveMetastore_add_token_pargs args; + args.token_identifier = &token_identifier; + args.delegation_token = &delegation_token; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66274,7 +67907,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_grant_revoke_privileges(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) { int32_t rseqid = 0; @@ -66303,7 +67936,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("grant_revoke_privileges") != 0) { + if (fname.compare("add_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66312,23 +67945,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_grant_revoke_privileges_presult result; + bool _return; + ThriftHiveMetastore_add_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } - if (result.__isset.o1) { sentry.commit(); - throw result.o1; + return _return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "grant_revoke_privileges failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66338,21 +67967,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_grant_revoke_privileges(GrantRevo } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::set_ugi(std::vector & _return, const std::string& user_name, const std::vector & group_names) +bool ThriftHiveMetastoreConcurrentClient::remove_token(const std::string& token_identifier) { - int32_t seqid = send_set_ugi(user_name, group_names); - recv_set_ugi(_return, seqid); + int32_t seqid = send_remove_token(token_identifier); + return recv_remove_token(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_set_ugi(const std::string& user_name, const std::vector & group_names) +int32_t ThriftHiveMetastoreConcurrentClient::send_remove_token(const std::string& token_identifier) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("set_ugi", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_set_ugi_pargs args; - args.user_name = &user_name; - args.group_names = &group_names; + ThriftHiveMetastore_remove_token_pargs args; + args.token_identifier = &token_identifier; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66363,7 +67991,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_set_ugi(const std::string& use return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector & _return, const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) { int32_t rseqid = 0; @@ -66392,7 +68020,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("set_ugi") != 0) { + if (fname.compare("remove_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66401,23 +68029,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_set_ugi_presult result; + bool _return; + ThriftHiveMetastore_remove_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { - // _return pointer has now been filled sentry.commit(); - return; - } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; + return _return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_ugi failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66427,21 +68051,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_set_ugi(std::vector } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) +void ThriftHiveMetastoreConcurrentClient::get_token(std::string& _return, const std::string& token_identifier) { - int32_t seqid = send_get_delegation_token(token_owner, renewer_kerberos_principal_name); - recv_get_delegation_token(_return, seqid); + int32_t seqid = send_get_token(token_identifier); + recv_get_token(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_delegation_token(const std::string& token_owner, const std::string& renewer_kerberos_principal_name) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_token(const std::string& token_identifier) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_token", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_delegation_token_pargs args; - args.token_owner = &token_owner; - args.renewer_kerberos_principal_name = &renewer_kerberos_principal_name; + ThriftHiveMetastore_get_token_pargs args; + args.token_identifier = &token_identifier; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66452,7 +68075,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_delegation_token(const std return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66481,7 +68104,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_delegation_token") != 0) { + if (fname.compare("get_token") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66490,7 +68113,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_delegation_token_presult result; + ThriftHiveMetastore_get_token_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -66501,12 +68124,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_delegation_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_token failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66516,20 +68135,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_delegation_token(std::string& } // end while(true) } -int64_t ThriftHiveMetastoreConcurrentClient::renew_delegation_token(const std::string& token_str_form) +void ThriftHiveMetastoreConcurrentClient::get_all_token_identifiers(std::vector & _return) { - int32_t seqid = send_renew_delegation_token(token_str_form); - return recv_renew_delegation_token(seqid); + int32_t seqid = send_get_all_token_identifiers(); + recv_get_all_token_identifiers(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_renew_delegation_token(const std::string& token_str_form) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_token_identifiers() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("renew_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_renew_delegation_token_pargs args; - args.token_str_form = &token_str_form; + ThriftHiveMetastore_get_all_token_identifiers_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66540,7 +68158,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_renew_delegation_token(const s return cseqid; } -int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66569,7 +68187,7 @@ int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const i iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("renew_delegation_token") != 0) { + if (fname.compare("get_all_token_identifiers") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66578,23 +68196,19 @@ int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const i using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int64_t _return; - ThriftHiveMetastore_renew_delegation_token_presult result; + ThriftHiveMetastore_get_all_token_identifiers_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; - } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; + return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "renew_delegation_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_token_identifiers failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66604,20 +68218,20 @@ int64_t ThriftHiveMetastoreConcurrentClient::recv_renew_delegation_token(const i } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::cancel_delegation_token(const std::string& token_str_form) +int32_t ThriftHiveMetastoreConcurrentClient::add_master_key(const std::string& key) { - int32_t seqid = send_cancel_delegation_token(token_str_form); - recv_cancel_delegation_token(seqid); + int32_t seqid = send_add_master_key(key); + return recv_add_master_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_cancel_delegation_token(const std::string& token_str_form) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::string& key) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("cancel_delegation_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cancel_delegation_token_pargs args; - args.token_str_form = &token_str_form; + ThriftHiveMetastore_add_master_key_pargs args; + args.key = &key; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66628,7 +68242,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_cancel_delegation_token(const return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int32_t seqid) +int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t seqid) { int32_t rseqid = 0; @@ -66657,7 +68271,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cancel_delegation_token") != 0) { + if (fname.compare("add_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66666,17 +68280,23 @@ void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_cancel_delegation_token_presult result; + int32_t _return; + ThriftHiveMetastore_add_master_key_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + sentry.commit(); + return _return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_master_key failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66686,21 +68306,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_cancel_delegation_token(const int } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::add_token(const std::string& token_identifier, const std::string& delegation_token) +void ThriftHiveMetastoreConcurrentClient::update_master_key(const int32_t seq_number, const std::string& key) { - int32_t seqid = send_add_token(token_identifier, delegation_token); - return recv_add_token(seqid); + int32_t seqid = send_update_master_key(seq_number, key); + recv_update_master_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_token(const std::string& token_identifier, const std::string& delegation_token) +int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_t seq_number, const std::string& key) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_token_pargs args; - args.token_identifier = &token_identifier; - args.delegation_token = &delegation_token; + ThriftHiveMetastore_update_master_key_pargs args; + args.seq_number = &seq_number; + args.key = &key; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66711,7 +68331,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_token(const std::string& t return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t seqid) { int32_t rseqid = 0; @@ -66740,7 +68360,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_token") != 0) { + if (fname.compare("update_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66749,19 +68369,21 @@ bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_add_token_presult result; - result.success = &_return; + ThriftHiveMetastore_update_master_key_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { + if (result.__isset.o1) { sentry.commit(); - return _return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_token failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66771,20 +68393,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_add_token(const int32_t seqid) } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::remove_token(const std::string& token_identifier) +bool ThriftHiveMetastoreConcurrentClient::remove_master_key(const int32_t key_seq) { - int32_t seqid = send_remove_token(token_identifier); - return recv_remove_token(seqid); + int32_t seqid = send_remove_master_key(key_seq); + return recv_remove_master_key(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_remove_token(const std::string& token_identifier) +int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_t key_seq) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("remove_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_remove_token_pargs args; - args.token_identifier = &token_identifier; + ThriftHiveMetastore_remove_master_key_pargs args; + args.key_seq = &key_seq; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66795,7 +68417,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_remove_token(const std::string return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) +bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t seqid) { int32_t rseqid = 0; @@ -66824,7 +68446,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("remove_token") != 0) { + if (fname.compare("remove_master_key") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66834,7 +68456,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) throw TProtocolException(TProtocolException::INVALID_DATA); } bool _return; - ThriftHiveMetastore_remove_token_presult result; + ThriftHiveMetastore_remove_master_key_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -66845,7 +68467,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) return _return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_master_key failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66855,20 +68477,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_token(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_token(std::string& _return, const std::string& token_identifier) +void ThriftHiveMetastoreConcurrentClient::get_master_keys(std::vector & _return) { - int32_t seqid = send_get_token(token_identifier); - recv_get_token(_return, seqid); + int32_t seqid = send_get_master_keys(); + recv_get_master_keys(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_token(const std::string& token_identifier) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_token", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_token_pargs args; - args.token_identifier = &token_identifier; + ThriftHiveMetastore_get_master_keys_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66879,7 +68500,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_token(const std::string& t return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vector & _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66908,7 +68529,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_token") != 0) { + if (fname.compare("get_master_keys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -66917,7 +68538,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_token_presult result; + ThriftHiveMetastore_get_master_keys_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -66929,7 +68550,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_token failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_master_keys failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -66939,19 +68560,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_token(std::string& _return, c } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_all_token_identifiers(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::get_open_txns(GetOpenTxnsResponse& _return) { - int32_t seqid = send_get_all_token_identifiers(); - recv_get_all_token_identifiers(_return, seqid); + int32_t seqid = send_get_open_txns(); + recv_get_open_txns(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_token_identifiers() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_all_token_identifiers", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_all_token_identifiers_pargs args; + ThriftHiveMetastore_get_open_txns_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -66962,7 +68583,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_all_token_identifiers() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -66991,7 +68612,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_all_token_identifiers") != 0) { + if (fname.compare("get_open_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67000,7 +68621,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_all_token_identifiers_presult result; + ThriftHiveMetastore_get_open_txns_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67012,7 +68633,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_all_token_identifiers failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67022,20 +68643,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_all_token_identifiers(std::ve } // end while(true) } -int32_t ThriftHiveMetastoreConcurrentClient::add_master_key(const std::string& key) +void ThriftHiveMetastoreConcurrentClient::get_open_txns_info(GetOpenTxnsInfoResponse& _return) { - int32_t seqid = send_add_master_key(key); - return recv_add_master_key(seqid); + int32_t seqid = send_get_open_txns_info(); + recv_get_open_txns_info(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::string& key) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_master_key_pargs args; - args.key = &key; + ThriftHiveMetastore_get_open_txns_info_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67046,7 +68666,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_master_key(const std::stri return cseqid; } -int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67075,7 +68695,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_master_key") != 0) { + if (fname.compare("get_open_txns_info") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67084,110 +68704,19 @@ int32_t ThriftHiveMetastoreConcurrentClient::recv_add_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - int32_t _return; - ThriftHiveMetastore_add_master_key_presult result; + ThriftHiveMetastore_get_open_txns_info_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; - } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; + return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "add_master_key failed: unknown result"); - } - // seqid != rseqid - this->sync_.updatePending(fname, mtype, rseqid); - - // this will temporarily unlock the readMutex, and let other clients get work done - this->sync_.waitForWork(seqid); - } // end while(true) -} - -void ThriftHiveMetastoreConcurrentClient::update_master_key(const int32_t seq_number, const std::string& key) -{ - int32_t seqid = send_update_master_key(seq_number, key); - recv_update_master_key(seqid); -} - -int32_t ThriftHiveMetastoreConcurrentClient::send_update_master_key(const int32_t seq_number, const std::string& key) -{ - int32_t cseqid = this->sync_.generateSeqId(); - ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("update_master_key", ::apache::thrift::protocol::T_CALL, cseqid); - - ThriftHiveMetastore_update_master_key_pargs args; - args.seq_number = &seq_number; - args.key = &key; - args.write(oprot_); - - oprot_->writeMessageEnd(); - oprot_->getTransport()->writeEnd(); - oprot_->getTransport()->flush(); - - sentry.commit(); - return cseqid; -} - -void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t seqid) -{ - - int32_t rseqid = 0; - std::string fname; - ::apache::thrift::protocol::TMessageType mtype; - - // the read mutex gets dropped and reacquired as part of waitForWork() - // The destructor of this sentry wakes up other clients - ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); - - while(true) { - if(!this->sync_.getPending(fname, mtype, rseqid)) { - iprot_->readMessageBegin(fname, mtype, rseqid); - } - if(seqid == rseqid) { - if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { - ::apache::thrift::TApplicationException x; - x.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - sentry.commit(); - throw x; - } - if (mtype != ::apache::thrift::protocol::T_REPLY) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - } - if (fname.compare("update_master_key") != 0) { - iprot_->skip(::apache::thrift::protocol::T_STRUCT); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - // in a bad state, don't commit - using ::apache::thrift::protocol::TProtocolException; - throw TProtocolException(TProtocolException::INVALID_DATA); - } - ThriftHiveMetastore_update_master_key_presult result; - result.read(iprot_); - iprot_->readMessageEnd(); - iprot_->getTransport()->readEnd(); - - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - sentry.commit(); - return; + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns_info failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67197,20 +68726,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_update_master_key(const int32_t s } // end while(true) } -bool ThriftHiveMetastoreConcurrentClient::remove_master_key(const int32_t key_seq) +void ThriftHiveMetastoreConcurrentClient::open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) { - int32_t seqid = send_remove_master_key(key_seq); - return recv_remove_master_key(seqid); + int32_t seqid = send_open_txns(rqst); + recv_open_txns(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_t key_seq) +int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("remove_master_key", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_remove_master_key_pargs args; - args.key_seq = &key_seq; + ThriftHiveMetastore_open_txns_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67221,7 +68750,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_remove_master_key(const int32_ return cseqid; } -bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67250,7 +68779,7 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("remove_master_key") != 0) { + if (fname.compare("open_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67259,19 +68788,19 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - bool _return; - ThriftHiveMetastore_remove_master_key_presult result; + ThriftHiveMetastore_open_txns_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - return _return; + return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "remove_master_key failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "open_txns failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67281,19 +68810,20 @@ bool ThriftHiveMetastoreConcurrentClient::recv_remove_master_key(const int32_t s } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_master_keys(std::vector & _return) +void ThriftHiveMetastoreConcurrentClient::abort_txn(const AbortTxnRequest& rqst) { - int32_t seqid = send_get_master_keys(); - recv_get_master_keys(_return, seqid); + int32_t seqid = send_abort_txn(rqst); + recv_abort_txn(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() +int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_master_keys", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_master_keys_pargs args; + ThriftHiveMetastore_abort_txn_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67304,7 +68834,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_master_keys() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vector & _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) { int32_t rseqid = 0; @@ -67333,7 +68863,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_master_keys") != 0) { + if (fname.compare("abort_txn") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67342,19 +68872,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorreadMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_master_keys failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67364,19 +68892,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_master_keys(std::vectorsync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_open_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_open_txns_pargs args; + ThriftHiveMetastore_abort_txns_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67387,7 +68916,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) { int32_t rseqid = 0; @@ -67416,7 +68945,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_open_txns") != 0) { + if (fname.compare("abort_txns") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67425,19 +68954,17 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_open_txns_presult result; - result.success = &_return; + ThriftHiveMetastore_abort_txns_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns failed: unknown result"); + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67447,19 +68974,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns(GetOpenTxnsResponse } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_open_txns_info(GetOpenTxnsInfoResponse& _return) +void ThriftHiveMetastoreConcurrentClient::commit_txn(const CommitTxnRequest& rqst) { - int32_t seqid = send_get_open_txns_info(); - recv_get_open_txns_info(_return, seqid); + int32_t seqid = send_commit_txn(rqst); + recv_commit_txn(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() +int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_open_txns_info", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_open_txns_info_pargs args; + ThriftHiveMetastore_commit_txn_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67470,7 +68998,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_open_txns_info() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInfoResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) { int32_t rseqid = 0; @@ -67499,7 +69027,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInf iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_open_txns_info") != 0) { + if (fname.compare("commit_txn") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67508,19 +69036,21 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInf using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_open_txns_info_presult result; - result.success = &_return; + ThriftHiveMetastore_commit_txn_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled + if (result.__isset.o1) { sentry.commit(); - return; + throw result.o1; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_open_txns_info failed: unknown result"); + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67530,19 +69060,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_open_txns_info(GetOpenTxnsInf } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::lock(LockResponse& _return, const LockRequest& rqst) { - int32_t seqid = send_open_txns(rqst); - recv_open_txns(_return, seqid); + int32_t seqid = send_lock(rqst); + recv_lock(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_lock(const LockRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("open_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("lock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_open_txns_pargs args; + ThriftHiveMetastore_lock_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -67554,7 +69084,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_open_txns(const OpenTxnRequest return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67583,7 +69113,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("open_txns") != 0) { + if (fname.compare("lock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67592,7 +69122,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_open_txns_presult result; + ThriftHiveMetastore_lock_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -67603,8 +69133,16 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu sentry.commit(); return; } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "open_txns failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "lock failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67614,19 +69152,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_open_txns(OpenTxnsResponse& _retu } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::abort_txn(const AbortTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::check_lock(LockResponse& _return, const CheckLockRequest& rqst) { - int32_t seqid = send_abort_txn(rqst); - recv_abort_txn(seqid); + int32_t seqid = send_check_lock(rqst); + recv_check_lock(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_check_lock(const CheckLockRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("abort_txn", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_abort_txn_pargs args; + ThriftHiveMetastore_check_lock_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -67638,7 +69176,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txn(const AbortTxnReques return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67667,7 +69205,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("abort_txn") != 0) { + if (fname.compare("check_lock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67676,17 +69214,31 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_abort_txn_presult result; + ThriftHiveMetastore_check_lock_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } if (result.__isset.o1) { sentry.commit(); throw result.o1; } - sentry.commit(); - return; + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "check_lock failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67696,19 +69248,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txn(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::abort_txns(const AbortTxnsRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::unlock(const UnlockRequest& rqst) { - int32_t seqid = send_abort_txns(rqst); - recv_abort_txns(seqid); + int32_t seqid = send_unlock(rqst); + recv_unlock(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("abort_txns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("unlock", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_abort_txns_pargs args; + ThriftHiveMetastore_unlock_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -67720,7 +69272,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_abort_txns(const AbortTxnsRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) { int32_t rseqid = 0; @@ -67749,7 +69301,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("abort_txns") != 0) { + if (fname.compare("unlock") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67758,7 +69310,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_abort_txns_presult result; + ThriftHiveMetastore_unlock_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67767,6 +69319,10 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) sentry.commit(); throw result.o1; } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } sentry.commit(); return; } @@ -67778,19 +69334,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_abort_txns(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::commit_txn(const CommitTxnRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) { - int32_t seqid = send_commit_txn(rqst); - recv_commit_txn(seqid); + int32_t seqid = send_show_locks(rqst); + recv_show_locks(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("commit_txn", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_commit_txn_pargs args; + ThriftHiveMetastore_show_locks_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -67802,7 +69358,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_commit_txn(const CommitTxnRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -67831,7 +69387,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("commit_txn") != 0) { + if (fname.compare("show_locks") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67840,21 +69396,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_commit_txn_presult result; + ThriftHiveMetastore_show_locks_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { + if (result.__isset.success) { + // _return pointer has now been filled sentry.commit(); - throw result.o2; + return; } - sentry.commit(); - return; + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_locks failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67864,20 +69418,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_commit_txn(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::lock(LockResponse& _return, const LockRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::heartbeat(const HeartbeatRequest& ids) { - int32_t seqid = send_lock(rqst); - recv_lock(_return, seqid); + int32_t seqid = send_heartbeat(ids); + recv_heartbeat(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_lock(const LockRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatRequest& ids) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("lock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_lock_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_heartbeat_pargs args; + args.ids = &ids; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67888,7 +69442,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_lock(const LockRequest& rqst) return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) { int32_t rseqid = 0; @@ -67917,7 +69471,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("lock") != 0) { + if (fname.compare("heartbeat") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -67926,17 +69480,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_lock_presult result; - result.success = &_return; + ThriftHiveMetastore_heartbeat_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.success) { - // _return pointer has now been filled - sentry.commit(); - return; - } if (result.__isset.o1) { sentry.commit(); throw result.o1; @@ -67945,8 +69493,12 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const sentry.commit(); throw result.o2; } - // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "lock failed: unknown result"); + if (result.__isset.o3) { + sentry.commit(); + throw result.o3; + } + sentry.commit(); + return; } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -67956,20 +69508,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_lock(LockResponse& _return, const } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::check_lock(LockResponse& _return, const CheckLockRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) { - int32_t seqid = send_check_lock(rqst); - recv_check_lock(_return, seqid); + int32_t seqid = send_heartbeat_txn_range(txns); + recv_heartbeat_txn_range(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_check_lock(const CheckLockRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const HeartbeatTxnRangeRequest& txns) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("check_lock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_check_lock_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_heartbeat_txn_range_pargs args; + args.txns = &txns; args.write(oprot_); oprot_->writeMessageEnd(); @@ -67980,7 +69532,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_check_lock(const CheckLockRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68009,7 +69561,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("check_lock") != 0) { + if (fname.compare("heartbeat_txn_range") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68018,7 +69570,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_check_lock_presult result; + ThriftHiveMetastore_heartbeat_txn_range_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68029,20 +69581,8 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, sentry.commit(); return; } - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "check_lock failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_txn_range failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68052,19 +69592,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_check_lock(LockResponse& _return, } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::unlock(const UnlockRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::compact(const CompactionRequest& rqst) { - int32_t seqid = send_unlock(rqst); - recv_unlock(seqid); + int32_t seqid = send_compact(rqst); + recv_compact(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_compact(const CompactionRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("unlock", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("compact", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_unlock_pargs args; + ThriftHiveMetastore_compact_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -68076,7 +69616,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_unlock(const UnlockRequest& rq return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) { int32_t rseqid = 0; @@ -68105,7 +69645,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("unlock") != 0) { + if (fname.compare("compact") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68114,19 +69654,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_unlock_presult result; + ThriftHiveMetastore_compact_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } sentry.commit(); return; } @@ -68138,19 +69670,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_unlock(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) { - int32_t seqid = send_show_locks(rqst); - recv_show_locks(_return, seqid); + int32_t seqid = send_show_compact(rqst); + recv_show_compact(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompactRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("show_locks", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_show_locks_pargs args; + ThriftHiveMetastore_show_compact_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -68162,7 +69694,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_show_locks(const ShowLocksRequ return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68191,7 +69723,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("show_locks") != 0) { + if (fname.compare("show_compact") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68200,7 +69732,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_show_locks_presult result; + ThriftHiveMetastore_show_compact_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68212,7 +69744,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_locks failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_compact failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68222,20 +69754,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_locks(ShowLocksResponse& _re } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::heartbeat(const HeartbeatRequest& ids) +void ThriftHiveMetastoreConcurrentClient::add_dynamic_partitions(const AddDynamicPartitions& rqst) { - int32_t seqid = send_heartbeat(ids); - recv_heartbeat(seqid); + int32_t seqid = send_add_dynamic_partitions(rqst); + recv_add_dynamic_partitions(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatRequest& ids) +int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const AddDynamicPartitions& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("heartbeat", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_heartbeat_pargs args; - args.ids = &ids; + ThriftHiveMetastore_add_dynamic_partitions_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68246,7 +69778,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat(const HeartbeatReque return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int32_t seqid) { int32_t rseqid = 0; @@ -68275,7 +69807,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("heartbeat") != 0) { + if (fname.compare("add_dynamic_partitions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68284,7 +69816,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_heartbeat_presult result; + ThriftHiveMetastore_add_dynamic_partitions_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68297,10 +69829,6 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) sentry.commit(); throw result.o2; } - if (result.__isset.o3) { - sentry.commit(); - throw result.o3; - } sentry.commit(); return; } @@ -68312,20 +69840,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) +void ThriftHiveMetastoreConcurrentClient::get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) { - int32_t seqid = send_heartbeat_txn_range(txns); - recv_heartbeat_txn_range(_return, seqid); + int32_t seqid = send_get_next_notification(rqst); + recv_get_next_notification(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const HeartbeatTxnRangeRequest& txns) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const NotificationEventRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("heartbeat_txn_range", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_heartbeat_txn_range_pargs args; - args.txns = &txns; + ThriftHiveMetastore_get_next_notification_pargs args; + args.rqst = &rqst; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68336,7 +69864,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_txn_range(const Hear return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(NotificationEventResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68365,7 +69893,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("heartbeat_txn_range") != 0) { + if (fname.compare("get_next_notification") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68374,7 +69902,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_heartbeat_txn_range_presult result; + ThriftHiveMetastore_get_next_notification_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68386,7 +69914,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_txn_range failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_notification failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68396,20 +69924,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_txn_range(HeartbeatTxnR } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::compact(const CompactionRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::get_current_notificationEventId(CurrentNotificationEventId& _return) { - int32_t seqid = send_compact(rqst); - recv_compact(seqid); + int32_t seqid = send_get_current_notificationEventId(); + recv_get_current_notificationEventId(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_compact(const CompactionRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventId() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("compact", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_compact_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_get_current_notificationEventId_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68420,7 +69947,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_compact(const CompactionReques return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(CurrentNotificationEventId& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68449,7 +69976,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("compact") != 0) { + if (fname.compare("get_current_notificationEventId") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68458,13 +69985,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_compact_presult result; + ThriftHiveMetastore_get_current_notificationEventId_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - sentry.commit(); - return; + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68474,19 +70007,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_compact(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) { - int32_t seqid = send_show_compact(rqst); - recv_show_compact(_return, seqid); + int32_t seqid = send_fire_listener_event(rqst); + recv_fire_listener_event(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompactRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const FireEventRequest& rqst) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("show_compact", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_show_compact_pargs args; + ThriftHiveMetastore_fire_listener_event_pargs args; args.rqst = &rqst; args.write(oprot_); @@ -68498,7 +70031,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_show_compact(const ShowCompact return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResponse& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68527,7 +70060,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("show_compact") != 0) { + if (fname.compare("fire_listener_event") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68536,7 +70069,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_show_compact_presult result; + ThriftHiveMetastore_fire_listener_event_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68548,7 +70081,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "show_compact failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68558,20 +70091,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_show_compact(ShowCompactResponse& } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::add_dynamic_partitions(const AddDynamicPartitions& rqst) +void ThriftHiveMetastoreConcurrentClient::flushCache() { - int32_t seqid = send_add_dynamic_partitions(rqst); - recv_add_dynamic_partitions(seqid); + int32_t seqid = send_flushCache(); + recv_flushCache(seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const AddDynamicPartitions& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("add_dynamic_partitions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_add_dynamic_partitions_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_flushCache_pargs args; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68582,7 +70114,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_add_dynamic_partitions(const A return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) { int32_t rseqid = 0; @@ -68611,7 +70143,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("add_dynamic_partitions") != 0) { + if (fname.compare("flushCache") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68620,19 +70152,11 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_add_dynamic_partitions_presult result; + ThriftHiveMetastore_flushCache_presult result; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - if (result.__isset.o1) { - sentry.commit(); - throw result.o1; - } - if (result.__isset.o2) { - sentry.commit(); - throw result.o2; - } sentry.commit(); return; } @@ -68644,20 +70168,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_add_dynamic_partitions(const int3 } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) { - int32_t seqid = send_get_next_notification(rqst); - recv_get_next_notification(_return, seqid); + int32_t seqid = send_get_file_metadata_by_expr(req); + recv_get_file_metadata_by_expr(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const NotificationEventRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_next_notification", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_next_notification_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68668,7 +70192,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_notification(const No return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(NotificationEventResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68697,7 +70221,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_next_notification") != 0) { + if (fname.compare("get_file_metadata_by_expr") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68706,7 +70230,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_next_notification_presult result; + ThriftHiveMetastore_get_file_metadata_by_expr_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68718,7 +70242,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_notification failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68728,19 +70252,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_next_notification(Notificatio } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_current_notificationEventId(CurrentNotificationEventId& _return) +void ThriftHiveMetastoreConcurrentClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) { - int32_t seqid = send_get_current_notificationEventId(); - recv_get_current_notificationEventId(_return, seqid); + int32_t seqid = send_get_file_metadata(req); + recv_get_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventId() +int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_current_notificationEventId", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_current_notificationEventId_pargs args; + ThriftHiveMetastore_get_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68751,7 +70276,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_current_notificationEventI return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(CurrentNotificationEventId& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68780,7 +70305,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_current_notificationEventId") != 0) { + if (fname.compare("get_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68789,7 +70314,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_current_notificationEventId_presult result; + ThriftHiveMetastore_get_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68801,7 +70326,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_current_notificationEventId failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68811,20 +70336,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_current_notificationEventId(C } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) +void ThriftHiveMetastoreConcurrentClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) { - int32_t seqid = send_fire_listener_event(rqst); - recv_fire_listener_event(_return, seqid); + int32_t seqid = send_put_file_metadata(req); + recv_put_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const FireEventRequest& rqst) +int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("fire_listener_event", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_fire_listener_event_pargs args; - args.rqst = &rqst; + ThriftHiveMetastore_put_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68835,7 +70360,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_fire_listener_event(const Fire return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResponse& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68864,7 +70389,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("fire_listener_event") != 0) { + if (fname.compare("put_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68873,7 +70398,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_fire_listener_event_presult result; + ThriftHiveMetastore_put_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -68885,7 +70410,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fire_listener_event failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68895,19 +70420,20 @@ void ThriftHiveMetastoreConcurrentClient::recv_fire_listener_event(FireEventResp } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::flushCache() +void ThriftHiveMetastoreConcurrentClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) { - int32_t seqid = send_flushCache(); - recv_flushCache(seqid); + int32_t seqid = send_clear_file_metadata(req); + recv_clear_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() +int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("flushCache", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_flushCache_pargs args; + ThriftHiveMetastore_clear_file_metadata_pargs args; + args.req = &req; args.write(oprot_); oprot_->writeMessageEnd(); @@ -68918,7 +70444,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_flushCache() return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -68947,7 +70473,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("flushCache") != 0) { + if (fname.compare("clear_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -68956,13 +70482,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_flushCache_presult result; + ThriftHiveMetastore_clear_file_metadata_presult result; + result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); - sentry.commit(); - return; + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -68972,19 +70504,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_flushCache(const int32_t seqid) } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) +void ThriftHiveMetastoreConcurrentClient::cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) { - int32_t seqid = send_get_file_metadata_by_expr(req); - recv_get_file_metadata_by_expr(_return, seqid); + int32_t seqid = send_cache_file_metadata(req); + recv_cache_file_metadata(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(const GetFileMetadataByExprRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const CacheFileMetadataRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_file_metadata_by_expr", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_file_metadata_by_expr_pargs args; + ThriftHiveMetastore_cache_file_metadata_pargs args; args.req = &req; args.write(oprot_); @@ -68996,7 +70528,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata_by_expr(cons return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69025,7 +70557,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_file_metadata_by_expr") != 0) { + if (fname.compare("cache_file_metadata") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69034,7 +70566,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_file_metadata_by_expr_presult result; + ThriftHiveMetastore_cache_file_metadata_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69046,7 +70578,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata_by_expr failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69056,19 +70588,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata_by_expr(GetFile } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) { - int32_t seqid = send_get_file_metadata(req); - recv_get_file_metadata(_return, seqid); + int32_t seqid = send_get_next_write_id(req); + recv_get_next_write_id(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_next_write_id(const GetNextWriteIdRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("get_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_next_write_id", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_get_file_metadata_pargs args; + ThriftHiveMetastore_get_next_write_id_pargs args; args.req = &req; args.write(oprot_); @@ -69080,7 +70612,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_get_file_metadata(const GetFil return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_next_write_id(GetNextWriteIdResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69109,7 +70641,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("get_file_metadata") != 0) { + if (fname.compare("get_next_write_id") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69118,7 +70650,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_get_file_metadata_presult result; + ThriftHiveMetastore_get_next_write_id_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69130,7 +70662,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_next_write_id failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69140,19 +70672,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_file_metadata(GetFileMetadata } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) { - int32_t seqid = send_put_file_metadata(req); - recv_put_file_metadata(_return, seqid); + int32_t seqid = send_finalize_write_id(req); + recv_finalize_write_id(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_finalize_write_id(const FinalizeWriteIdRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("put_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("finalize_write_id", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_put_file_metadata_pargs args; + ThriftHiveMetastore_finalize_write_id_pargs args; args.req = &req; args.write(oprot_); @@ -69164,7 +70696,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_put_file_metadata(const PutFil return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_finalize_write_id(FinalizeWriteIdResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69193,7 +70725,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("put_file_metadata") != 0) { + if (fname.compare("finalize_write_id") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69202,7 +70734,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_put_file_metadata_presult result; + ThriftHiveMetastore_finalize_write_id_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69214,7 +70746,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "put_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "finalize_write_id failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69224,19 +70756,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_put_file_metadata(PutFileMetadata } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) { - int32_t seqid = send_clear_file_metadata(req); - recv_clear_file_metadata(_return, seqid); + int32_t seqid = send_heartbeat_write_id(req); + recv_heartbeat_write_id(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const ClearFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_heartbeat_write_id(const HeartbeatWriteIdRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("clear_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("heartbeat_write_id", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_clear_file_metadata_pargs args; + ThriftHiveMetastore_heartbeat_write_id_pargs args; args.req = &req; args.write(oprot_); @@ -69248,7 +70780,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_clear_file_metadata(const Clea return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_heartbeat_write_id(HeartbeatWriteIdResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69277,7 +70809,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("clear_file_metadata") != 0) { + if (fname.compare("heartbeat_write_id") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69286,7 +70818,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_clear_file_metadata_presult result; + ThriftHiveMetastore_heartbeat_write_id_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69298,7 +70830,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "clear_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "heartbeat_write_id failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); @@ -69308,19 +70840,19 @@ void ThriftHiveMetastoreConcurrentClient::recv_clear_file_metadata(ClearFileMeta } // end while(true) } -void ThriftHiveMetastoreConcurrentClient::cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) +void ThriftHiveMetastoreConcurrentClient::get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) { - int32_t seqid = send_cache_file_metadata(req); - recv_cache_file_metadata(_return, seqid); + int32_t seqid = send_get_valid_write_ids(req); + recv_get_valid_write_ids(_return, seqid); } -int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const CacheFileMetadataRequest& req) +int32_t ThriftHiveMetastoreConcurrentClient::send_get_valid_write_ids(const GetValidWriteIdsRequest& req) { int32_t cseqid = this->sync_.generateSeqId(); ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); - oprot_->writeMessageBegin("cache_file_metadata", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("get_valid_write_ids", ::apache::thrift::protocol::T_CALL, cseqid); - ThriftHiveMetastore_cache_file_metadata_pargs args; + ThriftHiveMetastore_get_valid_write_ids_pargs args; args.req = &req; args.write(oprot_); @@ -69332,7 +70864,7 @@ int32_t ThriftHiveMetastoreConcurrentClient::send_cache_file_metadata(const Cach return cseqid; } -void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid) +void ThriftHiveMetastoreConcurrentClient::recv_get_valid_write_ids(GetValidWriteIdsResult& _return, const int32_t seqid) { int32_t rseqid = 0; @@ -69361,7 +70893,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("cache_file_metadata") != 0) { + if (fname.compare("get_valid_write_ids") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); @@ -69370,7 +70902,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta using ::apache::thrift::protocol::TProtocolException; throw TProtocolException(TProtocolException::INVALID_DATA); } - ThriftHiveMetastore_cache_file_metadata_presult result; + ThriftHiveMetastore_get_valid_write_ids_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -69382,7 +70914,7 @@ void ThriftHiveMetastoreConcurrentClient::recv_cache_file_metadata(CacheFileMeta return; } // in a bad state, don't commit - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "cache_file_metadata failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_valid_write_ids failed: unknown result"); } // seqid != rseqid this->sync_.updatePending(fname, mtype, rseqid); diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 525408bafd24..6a3a0e2e52b6 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -172,6 +172,10 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) = 0; virtual void clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) = 0; virtual void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) = 0; + virtual void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) = 0; + virtual void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) = 0; + virtual void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) = 0; + virtual void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) = 0; }; class ThriftHiveMetastoreIfFactory : virtual public ::facebook::fb303::FacebookServiceIfFactory { @@ -679,6 +683,18 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void cache_file_metadata(CacheFileMetadataResult& /* _return */, const CacheFileMetadataRequest& /* req */) { return; } + void get_next_write_id(GetNextWriteIdResult& /* _return */, const GetNextWriteIdRequest& /* req */) { + return; + } + void finalize_write_id(FinalizeWriteIdResult& /* _return */, const FinalizeWriteIdRequest& /* req */) { + return; + } + void heartbeat_write_id(HeartbeatWriteIdResult& /* _return */, const HeartbeatWriteIdRequest& /* req */) { + return; + } + void get_valid_write_ids(GetValidWriteIdsResult& /* _return */, const GetValidWriteIdsRequest& /* req */) { + return; + } }; typedef struct _ThriftHiveMetastore_getMetaConf_args__isset { @@ -19261,6 +19277,422 @@ class ThriftHiveMetastore_cache_file_metadata_presult { }; +typedef struct _ThriftHiveMetastore_get_next_write_id_args__isset { + _ThriftHiveMetastore_get_next_write_id_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_get_next_write_id_args__isset; + +class ThriftHiveMetastore_get_next_write_id_args { + public: + + ThriftHiveMetastore_get_next_write_id_args(const ThriftHiveMetastore_get_next_write_id_args&); + ThriftHiveMetastore_get_next_write_id_args& operator=(const ThriftHiveMetastore_get_next_write_id_args&); + ThriftHiveMetastore_get_next_write_id_args() { + } + + virtual ~ThriftHiveMetastore_get_next_write_id_args() throw(); + GetNextWriteIdRequest req; + + _ThriftHiveMetastore_get_next_write_id_args__isset __isset; + + void __set_req(const GetNextWriteIdRequest& val); + + bool operator == (const ThriftHiveMetastore_get_next_write_id_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_next_write_id_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_next_write_id_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_next_write_id_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_next_write_id_pargs() throw(); + const GetNextWriteIdRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_next_write_id_result__isset { + _ThriftHiveMetastore_get_next_write_id_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_next_write_id_result__isset; + +class ThriftHiveMetastore_get_next_write_id_result { + public: + + ThriftHiveMetastore_get_next_write_id_result(const ThriftHiveMetastore_get_next_write_id_result&); + ThriftHiveMetastore_get_next_write_id_result& operator=(const ThriftHiveMetastore_get_next_write_id_result&); + ThriftHiveMetastore_get_next_write_id_result() { + } + + virtual ~ThriftHiveMetastore_get_next_write_id_result() throw(); + GetNextWriteIdResult success; + + _ThriftHiveMetastore_get_next_write_id_result__isset __isset; + + void __set_success(const GetNextWriteIdResult& val); + + bool operator == (const ThriftHiveMetastore_get_next_write_id_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_next_write_id_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_next_write_id_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_next_write_id_presult__isset { + _ThriftHiveMetastore_get_next_write_id_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_next_write_id_presult__isset; + +class ThriftHiveMetastore_get_next_write_id_presult { + public: + + + virtual ~ThriftHiveMetastore_get_next_write_id_presult() throw(); + GetNextWriteIdResult* success; + + _ThriftHiveMetastore_get_next_write_id_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_finalize_write_id_args__isset { + _ThriftHiveMetastore_finalize_write_id_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_finalize_write_id_args__isset; + +class ThriftHiveMetastore_finalize_write_id_args { + public: + + ThriftHiveMetastore_finalize_write_id_args(const ThriftHiveMetastore_finalize_write_id_args&); + ThriftHiveMetastore_finalize_write_id_args& operator=(const ThriftHiveMetastore_finalize_write_id_args&); + ThriftHiveMetastore_finalize_write_id_args() { + } + + virtual ~ThriftHiveMetastore_finalize_write_id_args() throw(); + FinalizeWriteIdRequest req; + + _ThriftHiveMetastore_finalize_write_id_args__isset __isset; + + void __set_req(const FinalizeWriteIdRequest& val); + + bool operator == (const ThriftHiveMetastore_finalize_write_id_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_finalize_write_id_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_finalize_write_id_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_finalize_write_id_pargs { + public: + + + virtual ~ThriftHiveMetastore_finalize_write_id_pargs() throw(); + const FinalizeWriteIdRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_finalize_write_id_result__isset { + _ThriftHiveMetastore_finalize_write_id_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_finalize_write_id_result__isset; + +class ThriftHiveMetastore_finalize_write_id_result { + public: + + ThriftHiveMetastore_finalize_write_id_result(const ThriftHiveMetastore_finalize_write_id_result&); + ThriftHiveMetastore_finalize_write_id_result& operator=(const ThriftHiveMetastore_finalize_write_id_result&); + ThriftHiveMetastore_finalize_write_id_result() { + } + + virtual ~ThriftHiveMetastore_finalize_write_id_result() throw(); + FinalizeWriteIdResult success; + + _ThriftHiveMetastore_finalize_write_id_result__isset __isset; + + void __set_success(const FinalizeWriteIdResult& val); + + bool operator == (const ThriftHiveMetastore_finalize_write_id_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_finalize_write_id_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_finalize_write_id_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_finalize_write_id_presult__isset { + _ThriftHiveMetastore_finalize_write_id_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_finalize_write_id_presult__isset; + +class ThriftHiveMetastore_finalize_write_id_presult { + public: + + + virtual ~ThriftHiveMetastore_finalize_write_id_presult() throw(); + FinalizeWriteIdResult* success; + + _ThriftHiveMetastore_finalize_write_id_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_heartbeat_write_id_args__isset { + _ThriftHiveMetastore_heartbeat_write_id_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_heartbeat_write_id_args__isset; + +class ThriftHiveMetastore_heartbeat_write_id_args { + public: + + ThriftHiveMetastore_heartbeat_write_id_args(const ThriftHiveMetastore_heartbeat_write_id_args&); + ThriftHiveMetastore_heartbeat_write_id_args& operator=(const ThriftHiveMetastore_heartbeat_write_id_args&); + ThriftHiveMetastore_heartbeat_write_id_args() { + } + + virtual ~ThriftHiveMetastore_heartbeat_write_id_args() throw(); + HeartbeatWriteIdRequest req; + + _ThriftHiveMetastore_heartbeat_write_id_args__isset __isset; + + void __set_req(const HeartbeatWriteIdRequest& val); + + bool operator == (const ThriftHiveMetastore_heartbeat_write_id_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_heartbeat_write_id_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_heartbeat_write_id_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_heartbeat_write_id_pargs { + public: + + + virtual ~ThriftHiveMetastore_heartbeat_write_id_pargs() throw(); + const HeartbeatWriteIdRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_heartbeat_write_id_result__isset { + _ThriftHiveMetastore_heartbeat_write_id_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_heartbeat_write_id_result__isset; + +class ThriftHiveMetastore_heartbeat_write_id_result { + public: + + ThriftHiveMetastore_heartbeat_write_id_result(const ThriftHiveMetastore_heartbeat_write_id_result&); + ThriftHiveMetastore_heartbeat_write_id_result& operator=(const ThriftHiveMetastore_heartbeat_write_id_result&); + ThriftHiveMetastore_heartbeat_write_id_result() { + } + + virtual ~ThriftHiveMetastore_heartbeat_write_id_result() throw(); + HeartbeatWriteIdResult success; + + _ThriftHiveMetastore_heartbeat_write_id_result__isset __isset; + + void __set_success(const HeartbeatWriteIdResult& val); + + bool operator == (const ThriftHiveMetastore_heartbeat_write_id_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_heartbeat_write_id_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_heartbeat_write_id_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_heartbeat_write_id_presult__isset { + _ThriftHiveMetastore_heartbeat_write_id_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_heartbeat_write_id_presult__isset; + +class ThriftHiveMetastore_heartbeat_write_id_presult { + public: + + + virtual ~ThriftHiveMetastore_heartbeat_write_id_presult() throw(); + HeartbeatWriteIdResult* success; + + _ThriftHiveMetastore_heartbeat_write_id_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_get_valid_write_ids_args__isset { + _ThriftHiveMetastore_get_valid_write_ids_args__isset() : req(false) {} + bool req :1; +} _ThriftHiveMetastore_get_valid_write_ids_args__isset; + +class ThriftHiveMetastore_get_valid_write_ids_args { + public: + + ThriftHiveMetastore_get_valid_write_ids_args(const ThriftHiveMetastore_get_valid_write_ids_args&); + ThriftHiveMetastore_get_valid_write_ids_args& operator=(const ThriftHiveMetastore_get_valid_write_ids_args&); + ThriftHiveMetastore_get_valid_write_ids_args() { + } + + virtual ~ThriftHiveMetastore_get_valid_write_ids_args() throw(); + GetValidWriteIdsRequest req; + + _ThriftHiveMetastore_get_valid_write_ids_args__isset __isset; + + void __set_req(const GetValidWriteIdsRequest& val); + + bool operator == (const ThriftHiveMetastore_get_valid_write_ids_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_valid_write_ids_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_valid_write_ids_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_valid_write_ids_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_valid_write_ids_pargs() throw(); + const GetValidWriteIdsRequest* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_valid_write_ids_result__isset { + _ThriftHiveMetastore_get_valid_write_ids_result__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_valid_write_ids_result__isset; + +class ThriftHiveMetastore_get_valid_write_ids_result { + public: + + ThriftHiveMetastore_get_valid_write_ids_result(const ThriftHiveMetastore_get_valid_write_ids_result&); + ThriftHiveMetastore_get_valid_write_ids_result& operator=(const ThriftHiveMetastore_get_valid_write_ids_result&); + ThriftHiveMetastore_get_valid_write_ids_result() { + } + + virtual ~ThriftHiveMetastore_get_valid_write_ids_result() throw(); + GetValidWriteIdsResult success; + + _ThriftHiveMetastore_get_valid_write_ids_result__isset __isset; + + void __set_success(const GetValidWriteIdsResult& val); + + bool operator == (const ThriftHiveMetastore_get_valid_write_ids_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_valid_write_ids_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_valid_write_ids_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_valid_write_ids_presult__isset { + _ThriftHiveMetastore_get_valid_write_ids_presult__isset() : success(false) {} + bool success :1; +} _ThriftHiveMetastore_get_valid_write_ids_presult__isset; + +class ThriftHiveMetastore_get_valid_write_ids_presult { + public: + + + virtual ~ThriftHiveMetastore_get_valid_write_ids_presult() throw(); + GetValidWriteIdsResult* success; + + _ThriftHiveMetastore_get_valid_write_ids_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public ::facebook::fb303::FacebookServiceClient { public: ThriftHiveMetastoreClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : @@ -19722,6 +20154,18 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req); void send_cache_file_metadata(const CacheFileMetadataRequest& req); void recv_cache_file_metadata(CacheFileMetadataResult& _return); + void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req); + void send_get_next_write_id(const GetNextWriteIdRequest& req); + void recv_get_next_write_id(GetNextWriteIdResult& _return); + void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req); + void send_finalize_write_id(const FinalizeWriteIdRequest& req); + void recv_finalize_write_id(FinalizeWriteIdResult& _return); + void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req); + void send_heartbeat_write_id(const HeartbeatWriteIdRequest& req); + void recv_heartbeat_write_id(HeartbeatWriteIdResult& _return); + void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req); + void send_get_valid_write_ids(const GetValidWriteIdsRequest& req); + void recv_get_valid_write_ids(GetValidWriteIdsResult& _return); }; class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceProcessor { @@ -19882,6 +20326,10 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_put_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_clear_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_cache_file_metadata(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_next_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_finalize_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_heartbeat_write_id(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_valid_write_ids(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); public: ThriftHiveMetastoreProcessor(boost::shared_ptr iface) : ::facebook::fb303::FacebookServiceProcessor(iface), @@ -20036,6 +20484,10 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["put_file_metadata"] = &ThriftHiveMetastoreProcessor::process_put_file_metadata; processMap_["clear_file_metadata"] = &ThriftHiveMetastoreProcessor::process_clear_file_metadata; processMap_["cache_file_metadata"] = &ThriftHiveMetastoreProcessor::process_cache_file_metadata; + processMap_["get_next_write_id"] = &ThriftHiveMetastoreProcessor::process_get_next_write_id; + processMap_["finalize_write_id"] = &ThriftHiveMetastoreProcessor::process_finalize_write_id; + processMap_["heartbeat_write_id"] = &ThriftHiveMetastoreProcessor::process_heartbeat_write_id; + processMap_["get_valid_write_ids"] = &ThriftHiveMetastoreProcessor::process_get_valid_write_ids; } virtual ~ThriftHiveMetastoreProcessor() {} @@ -21506,6 +21958,46 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } + void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_next_write_id(_return, req); + } + ifaces_[i]->get_next_write_id(_return, req); + return; + } + + void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->finalize_write_id(_return, req); + } + ifaces_[i]->finalize_write_id(_return, req); + return; + } + + void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->heartbeat_write_id(_return, req); + } + ifaces_[i]->heartbeat_write_id(_return, req); + return; + } + + void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_valid_write_ids(_return, req); + } + ifaces_[i]->get_valid_write_ids(_return, req); + return; + } + }; // The 'concurrent' client is a thread safe client that correctly handles @@ -21972,6 +22464,18 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req); int32_t send_cache_file_metadata(const CacheFileMetadataRequest& req); void recv_cache_file_metadata(CacheFileMetadataResult& _return, const int32_t seqid); + void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req); + int32_t send_get_next_write_id(const GetNextWriteIdRequest& req); + void recv_get_next_write_id(GetNextWriteIdResult& _return, const int32_t seqid); + void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req); + int32_t send_finalize_write_id(const FinalizeWriteIdRequest& req); + void recv_finalize_write_id(FinalizeWriteIdResult& _return, const int32_t seqid); + void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req); + int32_t send_heartbeat_write_id(const HeartbeatWriteIdRequest& req); + void recv_heartbeat_write_id(HeartbeatWriteIdResult& _return, const int32_t seqid); + void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req); + int32_t send_get_valid_write_ids(const GetValidWriteIdsRequest& req); + void recv_get_valid_write_ids(GetValidWriteIdsResult& _return, const int32_t seqid); }; #ifdef _WIN32 diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index d66fdbec64bd..bb37e6976700 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -772,6 +772,26 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("cache_file_metadata\n"); } + void get_next_write_id(GetNextWriteIdResult& _return, const GetNextWriteIdRequest& req) { + // Your implementation goes here + printf("get_next_write_id\n"); + } + + void finalize_write_id(FinalizeWriteIdResult& _return, const FinalizeWriteIdRequest& req) { + // Your implementation goes here + printf("finalize_write_id\n"); + } + + void heartbeat_write_id(HeartbeatWriteIdResult& _return, const HeartbeatWriteIdRequest& req) { + // Your implementation goes here + printf("heartbeat_write_id\n"); + } + + void get_valid_write_ids(GetValidWriteIdsResult& _return, const GetValidWriteIdsRequest& req) { + // Your implementation goes here + printf("get_valid_write_ids\n"); + } + }; int main(int argc, char **argv) { diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp index 1cbd176597b4..ccc61cb42523 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp @@ -59,6 +59,8 @@ hive_metastoreConstants::hive_metastoreConstants() { TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties"; + TABLE_IS_MM = "hivecommit"; + } }}} // namespace diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h index 3d068c3ec9e9..92a211647bc3 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_constants.h @@ -39,6 +39,7 @@ class hive_metastoreConstants { std::string TABLE_IS_TRANSACTIONAL; std::string TABLE_NO_AUTO_COMPACT; std::string TABLE_TRANSACTIONAL_PROPERTIES; + std::string TABLE_IS_MM; }; extern const hive_metastoreConstants g_hive_metastore_constants; diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index f3aaf7e0eec2..380257c7320b 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -4471,6 +4471,16 @@ void Table::__set_temporary(const bool val) { __isset.temporary = true; } +void Table::__set_mmNextWriteId(const int64_t val) { + this->mmNextWriteId = val; +__isset.mmNextWriteId = true; +} + +void Table::__set_mmWatermarkWriteId(const int64_t val) { + this->mmWatermarkWriteId = val; +__isset.mmWatermarkWriteId = true; +} + uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -4631,6 +4641,22 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 15: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->mmNextWriteId); + this->__isset.mmNextWriteId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 16: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->mmWatermarkWriteId); + this->__isset.mmWatermarkWriteId = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -4723,6 +4749,16 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeBool(this->temporary); xfer += oprot->writeFieldEnd(); } + if (this->__isset.mmNextWriteId) { + xfer += oprot->writeFieldBegin("mmNextWriteId", ::apache::thrift::protocol::T_I64, 15); + xfer += oprot->writeI64(this->mmNextWriteId); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.mmWatermarkWriteId) { + xfer += oprot->writeFieldBegin("mmWatermarkWriteId", ::apache::thrift::protocol::T_I64, 16); + xfer += oprot->writeI64(this->mmWatermarkWriteId); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -4744,6 +4780,8 @@ void swap(Table &a, Table &b) { swap(a.tableType, b.tableType); swap(a.privileges, b.privileges); swap(a.temporary, b.temporary); + swap(a.mmNextWriteId, b.mmNextWriteId); + swap(a.mmWatermarkWriteId, b.mmWatermarkWriteId); swap(a.__isset, b.__isset); } @@ -4762,6 +4800,8 @@ Table::Table(const Table& other221) { tableType = other221.tableType; privileges = other221.privileges; temporary = other221.temporary; + mmNextWriteId = other221.mmNextWriteId; + mmWatermarkWriteId = other221.mmWatermarkWriteId; __isset = other221.__isset; } Table& Table::operator=(const Table& other222) { @@ -4779,6 +4819,8 @@ Table& Table::operator=(const Table& other222) { tableType = other222.tableType; privileges = other222.privileges; temporary = other222.temporary; + mmNextWriteId = other222.mmNextWriteId; + mmWatermarkWriteId = other222.mmWatermarkWriteId; __isset = other222.__isset; return *this; } @@ -4799,6 +4841,8 @@ void Table::printTo(std::ostream& out) const { out << ", " << "tableType=" << to_string(tableType); out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); out << ", " << "temporary="; (__isset.temporary ? (out << to_string(temporary)) : (out << "")); + out << ", " << "mmNextWriteId="; (__isset.mmNextWriteId ? (out << to_string(mmNextWriteId)) : (out << "")); + out << ", " << "mmWatermarkWriteId="; (__isset.mmWatermarkWriteId ? (out << to_string(mmWatermarkWriteId)) : (out << "")); out << ")"; } @@ -17676,16 +17720,19 @@ void CacheFileMetadataRequest::printTo(std::ostream& out) const { } -GetAllFunctionsResponse::~GetAllFunctionsResponse() throw() { +GetNextWriteIdRequest::~GetNextWriteIdRequest() throw() { } -void GetAllFunctionsResponse::__set_functions(const std::vector & val) { - this->functions = val; -__isset.functions = true; +void GetNextWriteIdRequest::__set_dbName(const std::string& val) { + this->dbName = val; } -uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { +void GetNextWriteIdRequest::__set_tblName(const std::string& val) { + this->tblName = val; +} + +uint32_t GetNextWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -17697,6 +17744,8 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip using ::apache::thrift::protocol::TProtocolException; + bool isset_dbName = false; + bool isset_tblName = false; while (true) { @@ -17707,21 +17756,17 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - this->functions.clear(); - uint32_t _size739; - ::apache::thrift::protocol::TType _etype742; - xfer += iprot->readListBegin(_etype742, _size739); - this->functions.resize(_size739); - uint32_t _i743; - for (_i743 = 0; _i743 < _size739; ++_i743) - { - xfer += this->functions[_i743].read(iprot); - } - xfer += iprot->readListEnd(); - } - this->__isset.functions = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tblName); + isset_tblName = true; } else { xfer += iprot->skip(ftype); } @@ -17735,77 +17780,162 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip xfer += iprot->readStructEnd(); + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tblName) + throw TProtocolException(TProtocolException::INVALID_DATA); return xfer; } -uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t GetNextWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("GetAllFunctionsResponse"); + xfer += oprot->writeStructBegin("GetNextWriteIdRequest"); - if (this->__isset.functions) { - xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tblName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetNextWriteIdRequest &a, GetNextWriteIdRequest &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tblName, b.tblName); +} + +GetNextWriteIdRequest::GetNextWriteIdRequest(const GetNextWriteIdRequest& other739) { + dbName = other739.dbName; + tblName = other739.tblName; +} +GetNextWriteIdRequest& GetNextWriteIdRequest::operator=(const GetNextWriteIdRequest& other740) { + dbName = other740.dbName; + tblName = other740.tblName; + return *this; +} +void GetNextWriteIdRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetNextWriteIdRequest("; + out << "dbName=" << to_string(dbName); + out << ", " << "tblName=" << to_string(tblName); + out << ")"; +} + + +GetNextWriteIdResult::~GetNextWriteIdResult() throw() { +} + + +void GetNextWriteIdResult::__set_writeId(const int64_t val) { + this->writeId = val; +} + +uint32_t GetNextWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_writeId = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter744; - for (_iter744 = this->functions.begin(); _iter744 != this->functions.end(); ++_iter744) - { - xfer += (*_iter744).write(oprot); - } - xfer += oprot->writeListEnd(); + case 1: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + isset_writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; } - xfer += oprot->writeFieldEnd(); + xfer += iprot->readFieldEnd(); } + + xfer += iprot->readStructEnd(); + + if (!isset_writeId) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetNextWriteIdResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetNextWriteIdResult"); + + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 1); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { +void swap(GetNextWriteIdResult &a, GetNextWriteIdResult &b) { using ::std::swap; - swap(a.functions, b.functions); - swap(a.__isset, b.__isset); + swap(a.writeId, b.writeId); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other745) { - functions = other745.functions; - __isset = other745.__isset; +GetNextWriteIdResult::GetNextWriteIdResult(const GetNextWriteIdResult& other741) { + writeId = other741.writeId; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other746) { - functions = other746.functions; - __isset = other746.__isset; +GetNextWriteIdResult& GetNextWriteIdResult::operator=(const GetNextWriteIdResult& other742) { + writeId = other742.writeId; return *this; } -void GetAllFunctionsResponse::printTo(std::ostream& out) const { +void GetNextWriteIdResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "GetAllFunctionsResponse("; - out << "functions="; (__isset.functions ? (out << to_string(functions)) : (out << "")); + out << "GetNextWriteIdResult("; + out << "writeId=" << to_string(writeId); out << ")"; } -TableMeta::~TableMeta() throw() { +FinalizeWriteIdRequest::~FinalizeWriteIdRequest() throw() { } -void TableMeta::__set_dbName(const std::string& val) { +void FinalizeWriteIdRequest::__set_dbName(const std::string& val) { this->dbName = val; } -void TableMeta::__set_tableName(const std::string& val) { - this->tableName = val; +void FinalizeWriteIdRequest::__set_tblName(const std::string& val) { + this->tblName = val; } -void TableMeta::__set_tableType(const std::string& val) { - this->tableType = val; +void FinalizeWriteIdRequest::__set_writeId(const int64_t val) { + this->writeId = val; } -void TableMeta::__set_comments(const std::string& val) { - this->comments = val; -__isset.comments = true; +void FinalizeWriteIdRequest::__set_commit(const bool val) { + this->commit = val; } -uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t FinalizeWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -17818,8 +17948,9 @@ uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { using ::apache::thrift::protocol::TProtocolException; bool isset_dbName = false; - bool isset_tableName = false; - bool isset_tableType = false; + bool isset_tblName = false; + bool isset_writeId = false; + bool isset_commit = false; while (true) { @@ -17839,24 +17970,24 @@ uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tableName); - isset_tableName = true; + xfer += iprot->readString(this->tblName); + isset_tblName = true; } else { xfer += iprot->skip(ftype); } break; case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tableType); - isset_tableType = true; + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + isset_writeId = true; } else { xfer += iprot->skip(ftype); } break; case 4: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->comments); - this->__isset.comments = true; + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->commit); + isset_commit = true; } else { xfer += iprot->skip(ftype); } @@ -17872,84 +18003,78 @@ uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { if (!isset_dbName) throw TProtocolException(TProtocolException::INVALID_DATA); - if (!isset_tableName) + if (!isset_tblName) throw TProtocolException(TProtocolException::INVALID_DATA); - if (!isset_tableType) + if (!isset_writeId) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_commit) throw TProtocolException(TProtocolException::INVALID_DATA); return xfer; } -uint32_t TableMeta::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t FinalizeWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("TableMeta"); + xfer += oprot->writeStructBegin("FinalizeWriteIdRequest"); xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); xfer += oprot->writeString(this->dbName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tblName); xfer += oprot->writeFieldEnd(); - xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->tableType); + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 3); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("commit", ::apache::thrift::protocol::T_BOOL, 4); + xfer += oprot->writeBool(this->commit); xfer += oprot->writeFieldEnd(); - if (this->__isset.comments) { - xfer += oprot->writeFieldBegin("comments", ::apache::thrift::protocol::T_STRING, 4); - xfer += oprot->writeString(this->comments); - xfer += oprot->writeFieldEnd(); - } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(TableMeta &a, TableMeta &b) { +void swap(FinalizeWriteIdRequest &a, FinalizeWriteIdRequest &b) { using ::std::swap; swap(a.dbName, b.dbName); - swap(a.tableName, b.tableName); - swap(a.tableType, b.tableType); - swap(a.comments, b.comments); - swap(a.__isset, b.__isset); + swap(a.tblName, b.tblName); + swap(a.writeId, b.writeId); + swap(a.commit, b.commit); } -TableMeta::TableMeta(const TableMeta& other747) { - dbName = other747.dbName; - tableName = other747.tableName; - tableType = other747.tableType; - comments = other747.comments; - __isset = other747.__isset; +FinalizeWriteIdRequest::FinalizeWriteIdRequest(const FinalizeWriteIdRequest& other743) { + dbName = other743.dbName; + tblName = other743.tblName; + writeId = other743.writeId; + commit = other743.commit; } -TableMeta& TableMeta::operator=(const TableMeta& other748) { - dbName = other748.dbName; - tableName = other748.tableName; - tableType = other748.tableType; - comments = other748.comments; - __isset = other748.__isset; +FinalizeWriteIdRequest& FinalizeWriteIdRequest::operator=(const FinalizeWriteIdRequest& other744) { + dbName = other744.dbName; + tblName = other744.tblName; + writeId = other744.writeId; + commit = other744.commit; return *this; } -void TableMeta::printTo(std::ostream& out) const { +void FinalizeWriteIdRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "TableMeta("; + out << "FinalizeWriteIdRequest("; out << "dbName=" << to_string(dbName); - out << ", " << "tableName=" << to_string(tableName); - out << ", " << "tableType=" << to_string(tableType); - out << ", " << "comments="; (__isset.comments ? (out << to_string(comments)) : (out << "")); + out << ", " << "tblName=" << to_string(tblName); + out << ", " << "writeId=" << to_string(writeId); + out << ", " << "commit=" << to_string(commit); out << ")"; } -MetaException::~MetaException() throw() { +FinalizeWriteIdResult::~FinalizeWriteIdResult() throw() { } -void MetaException::__set_message(const std::string& val) { - this->message = val; -} - -uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t FinalizeWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -17968,20 +18093,7 @@ uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_STOP) { break; } - switch (fid) - { - case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->message); - this->__isset.message = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } + xfer += iprot->skip(ftype); xfer += iprot->readFieldEnd(); } @@ -17990,63 +18102,886 @@ uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { return xfer; } -uint32_t MetaException::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t FinalizeWriteIdResult::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("MetaException"); - - xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->message); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("FinalizeWriteIdResult"); xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; } -void swap(MetaException &a, MetaException &b) { +void swap(FinalizeWriteIdResult &a, FinalizeWriteIdResult &b) { using ::std::swap; - swap(a.message, b.message); - swap(a.__isset, b.__isset); + (void) a; + (void) b; } -MetaException::MetaException(const MetaException& other749) : TException() { - message = other749.message; - __isset = other749.__isset; +FinalizeWriteIdResult::FinalizeWriteIdResult(const FinalizeWriteIdResult& other745) { + (void) other745; } -MetaException& MetaException::operator=(const MetaException& other750) { - message = other750.message; - __isset = other750.__isset; +FinalizeWriteIdResult& FinalizeWriteIdResult::operator=(const FinalizeWriteIdResult& other746) { + (void) other746; return *this; } -void MetaException::printTo(std::ostream& out) const { +void FinalizeWriteIdResult::printTo(std::ostream& out) const { using ::apache::thrift::to_string; - out << "MetaException("; - out << "message=" << to_string(message); + out << "FinalizeWriteIdResult("; out << ")"; } -const char* MetaException::what() const throw() { - try { - std::stringstream ss; - ss << "TException - service has thrown: " << *this; - this->thriftTExceptionMessageHolder_ = ss.str(); - return this->thriftTExceptionMessageHolder_.c_str(); - } catch (const std::exception&) { - return "TException - service has thrown: MetaException"; - } + +HeartbeatWriteIdRequest::~HeartbeatWriteIdRequest() throw() { } -UnknownTableException::~UnknownTableException() throw() { +void HeartbeatWriteIdRequest::__set_dbName(const std::string& val) { + this->dbName = val; } +void HeartbeatWriteIdRequest::__set_tblName(const std::string& val) { + this->tblName = val; +} -void UnknownTableException::__set_message(const std::string& val) { - this->message = val; +void HeartbeatWriteIdRequest::__set_writeId(const int64_t val) { + this->writeId = val; } -uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t HeartbeatWriteIdRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_dbName = false; + bool isset_tblName = false; + bool isset_writeId = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tblName); + isset_tblName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->writeId); + isset_writeId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tblName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_writeId) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t HeartbeatWriteIdRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("HeartbeatWriteIdRequest"); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tblName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("writeId", ::apache::thrift::protocol::T_I64, 3); + xfer += oprot->writeI64(this->writeId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(HeartbeatWriteIdRequest &a, HeartbeatWriteIdRequest &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tblName, b.tblName); + swap(a.writeId, b.writeId); +} + +HeartbeatWriteIdRequest::HeartbeatWriteIdRequest(const HeartbeatWriteIdRequest& other747) { + dbName = other747.dbName; + tblName = other747.tblName; + writeId = other747.writeId; +} +HeartbeatWriteIdRequest& HeartbeatWriteIdRequest::operator=(const HeartbeatWriteIdRequest& other748) { + dbName = other748.dbName; + tblName = other748.tblName; + writeId = other748.writeId; + return *this; +} +void HeartbeatWriteIdRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "HeartbeatWriteIdRequest("; + out << "dbName=" << to_string(dbName); + out << ", " << "tblName=" << to_string(tblName); + out << ", " << "writeId=" << to_string(writeId); + out << ")"; +} + + +HeartbeatWriteIdResult::~HeartbeatWriteIdResult() throw() { +} + + +uint32_t HeartbeatWriteIdResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t HeartbeatWriteIdResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("HeartbeatWriteIdResult"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(HeartbeatWriteIdResult &a, HeartbeatWriteIdResult &b) { + using ::std::swap; + (void) a; + (void) b; +} + +HeartbeatWriteIdResult::HeartbeatWriteIdResult(const HeartbeatWriteIdResult& other749) { + (void) other749; +} +HeartbeatWriteIdResult& HeartbeatWriteIdResult::operator=(const HeartbeatWriteIdResult& other750) { + (void) other750; + return *this; +} +void HeartbeatWriteIdResult::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "HeartbeatWriteIdResult("; + out << ")"; +} + + +GetValidWriteIdsRequest::~GetValidWriteIdsRequest() throw() { +} + + +void GetValidWriteIdsRequest::__set_dbName(const std::string& val) { + this->dbName = val; +} + +void GetValidWriteIdsRequest::__set_tblName(const std::string& val) { + this->tblName = val; +} + +uint32_t GetValidWriteIdsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_dbName = false; + bool isset_tblName = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tblName); + isset_tblName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tblName) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetValidWriteIdsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetValidWriteIdsRequest"); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tblName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tblName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tblName, b.tblName); +} + +GetValidWriteIdsRequest::GetValidWriteIdsRequest(const GetValidWriteIdsRequest& other751) { + dbName = other751.dbName; + tblName = other751.tblName; +} +GetValidWriteIdsRequest& GetValidWriteIdsRequest::operator=(const GetValidWriteIdsRequest& other752) { + dbName = other752.dbName; + tblName = other752.tblName; + return *this; +} +void GetValidWriteIdsRequest::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetValidWriteIdsRequest("; + out << "dbName=" << to_string(dbName); + out << ", " << "tblName=" << to_string(tblName); + out << ")"; +} + + +GetValidWriteIdsResult::~GetValidWriteIdsResult() throw() { +} + + +void GetValidWriteIdsResult::__set_lowWatermarkId(const int64_t val) { + this->lowWatermarkId = val; +} + +void GetValidWriteIdsResult::__set_highWatermarkId(const int64_t val) { + this->highWatermarkId = val; +} + +void GetValidWriteIdsResult::__set_areIdsValid(const bool val) { + this->areIdsValid = val; +__isset.areIdsValid = true; +} + +void GetValidWriteIdsResult::__set_ids(const std::vector & val) { + this->ids = val; +__isset.ids = true; +} + +uint32_t GetValidWriteIdsResult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_lowWatermarkId = false; + bool isset_highWatermarkId = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->lowWatermarkId); + isset_lowWatermarkId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->highWatermarkId); + isset_highWatermarkId = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->areIdsValid); + this->__isset.areIdsValid = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->ids.clear(); + uint32_t _size753; + ::apache::thrift::protocol::TType _etype756; + xfer += iprot->readListBegin(_etype756, _size753); + this->ids.resize(_size753); + uint32_t _i757; + for (_i757 = 0; _i757 < _size753; ++_i757) + { + xfer += iprot->readI64(this->ids[_i757]); + } + xfer += iprot->readListEnd(); + } + this->__isset.ids = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_lowWatermarkId) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_highWatermarkId) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t GetValidWriteIdsResult::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetValidWriteIdsResult"); + + xfer += oprot->writeFieldBegin("lowWatermarkId", ::apache::thrift::protocol::T_I64, 1); + xfer += oprot->writeI64(this->lowWatermarkId); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("highWatermarkId", ::apache::thrift::protocol::T_I64, 2); + xfer += oprot->writeI64(this->highWatermarkId); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.areIdsValid) { + xfer += oprot->writeFieldBegin("areIdsValid", ::apache::thrift::protocol::T_BOOL, 3); + xfer += oprot->writeBool(this->areIdsValid); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.ids) { + xfer += oprot->writeFieldBegin("ids", ::apache::thrift::protocol::T_LIST, 4); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->ids.size())); + std::vector ::const_iterator _iter758; + for (_iter758 = this->ids.begin(); _iter758 != this->ids.end(); ++_iter758) + { + xfer += oprot->writeI64((*_iter758)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetValidWriteIdsResult &a, GetValidWriteIdsResult &b) { + using ::std::swap; + swap(a.lowWatermarkId, b.lowWatermarkId); + swap(a.highWatermarkId, b.highWatermarkId); + swap(a.areIdsValid, b.areIdsValid); + swap(a.ids, b.ids); + swap(a.__isset, b.__isset); +} + +GetValidWriteIdsResult::GetValidWriteIdsResult(const GetValidWriteIdsResult& other759) { + lowWatermarkId = other759.lowWatermarkId; + highWatermarkId = other759.highWatermarkId; + areIdsValid = other759.areIdsValid; + ids = other759.ids; + __isset = other759.__isset; +} +GetValidWriteIdsResult& GetValidWriteIdsResult::operator=(const GetValidWriteIdsResult& other760) { + lowWatermarkId = other760.lowWatermarkId; + highWatermarkId = other760.highWatermarkId; + areIdsValid = other760.areIdsValid; + ids = other760.ids; + __isset = other760.__isset; + return *this; +} +void GetValidWriteIdsResult::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetValidWriteIdsResult("; + out << "lowWatermarkId=" << to_string(lowWatermarkId); + out << ", " << "highWatermarkId=" << to_string(highWatermarkId); + out << ", " << "areIdsValid="; (__isset.areIdsValid ? (out << to_string(areIdsValid)) : (out << "")); + out << ", " << "ids="; (__isset.ids ? (out << to_string(ids)) : (out << "")); + out << ")"; +} + + +GetAllFunctionsResponse::~GetAllFunctionsResponse() throw() { +} + + +void GetAllFunctionsResponse::__set_functions(const std::vector & val) { + this->functions = val; +__isset.functions = true; +} + +uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->functions.clear(); + uint32_t _size761; + ::apache::thrift::protocol::TType _etype764; + xfer += iprot->readListBegin(_etype764, _size761); + this->functions.resize(_size761); + uint32_t _i765; + for (_i765 = 0; _i765 < _size761; ++_i765) + { + xfer += this->functions[_i765].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.functions = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("GetAllFunctionsResponse"); + + if (this->__isset.functions) { + xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); + std::vector ::const_iterator _iter766; + for (_iter766 = this->functions.begin(); _iter766 != this->functions.end(); ++_iter766) + { + xfer += (*_iter766).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { + using ::std::swap; + swap(a.functions, b.functions); + swap(a.__isset, b.__isset); +} + +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other767) { + functions = other767.functions; + __isset = other767.__isset; +} +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other768) { + functions = other768.functions; + __isset = other768.__isset; + return *this; +} +void GetAllFunctionsResponse::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "GetAllFunctionsResponse("; + out << "functions="; (__isset.functions ? (out << to_string(functions)) : (out << "")); + out << ")"; +} + + +TableMeta::~TableMeta() throw() { +} + + +void TableMeta::__set_dbName(const std::string& val) { + this->dbName = val; +} + +void TableMeta::__set_tableName(const std::string& val) { + this->tableName = val; +} + +void TableMeta::__set_tableType(const std::string& val) { + this->tableType = val; +} + +void TableMeta::__set_comments(const std::string& val) { + this->comments = val; +__isset.comments = true; +} + +uint32_t TableMeta::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_dbName = false; + bool isset_tableName = false; + bool isset_tableType = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->dbName); + isset_dbName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableName); + isset_tableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableType); + isset_tableType = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->comments); + this->__isset.comments = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_dbName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tableName) + throw TProtocolException(TProtocolException::INVALID_DATA); + if (!isset_tableType) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t TableMeta::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TableMeta"); + + xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->dbName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tableType", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->tableType); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.comments) { + xfer += oprot->writeFieldBegin("comments", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->comments); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(TableMeta &a, TableMeta &b) { + using ::std::swap; + swap(a.dbName, b.dbName); + swap(a.tableName, b.tableName); + swap(a.tableType, b.tableType); + swap(a.comments, b.comments); + swap(a.__isset, b.__isset); +} + +TableMeta::TableMeta(const TableMeta& other769) { + dbName = other769.dbName; + tableName = other769.tableName; + tableType = other769.tableType; + comments = other769.comments; + __isset = other769.__isset; +} +TableMeta& TableMeta::operator=(const TableMeta& other770) { + dbName = other770.dbName; + tableName = other770.tableName; + tableType = other770.tableType; + comments = other770.comments; + __isset = other770.__isset; + return *this; +} +void TableMeta::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "TableMeta("; + out << "dbName=" << to_string(dbName); + out << ", " << "tableName=" << to_string(tableName); + out << ", " << "tableType=" << to_string(tableType); + out << ", " << "comments="; (__isset.comments ? (out << to_string(comments)) : (out << "")); + out << ")"; +} + + +MetaException::~MetaException() throw() { +} + + +void MetaException::__set_message(const std::string& val) { + this->message = val; +} + +uint32_t MetaException::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->message); + this->__isset.message = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t MetaException::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("MetaException"); + + xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->message); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(MetaException &a, MetaException &b) { + using ::std::swap; + swap(a.message, b.message); + swap(a.__isset, b.__isset); +} + +MetaException::MetaException(const MetaException& other771) : TException() { + message = other771.message; + __isset = other771.__isset; +} +MetaException& MetaException::operator=(const MetaException& other772) { + message = other772.message; + __isset = other772.__isset; + return *this; +} +void MetaException::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "MetaException("; + out << "message=" << to_string(message); + out << ")"; +} + +const char* MetaException::what() const throw() { + try { + std::stringstream ss; + ss << "TException - service has thrown: " << *this; + this->thriftTExceptionMessageHolder_ = ss.str(); + return this->thriftTExceptionMessageHolder_.c_str(); + } catch (const std::exception&) { + return "TException - service has thrown: MetaException"; + } +} + + +UnknownTableException::~UnknownTableException() throw() { +} + + +void UnknownTableException::__set_message(const std::string& val) { + this->message = val; +} + +uint32_t UnknownTableException::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -18107,13 +19042,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other751) : TException() { - message = other751.message; - __isset = other751.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other773) : TException() { + message = other773.message; + __isset = other773.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other752) { - message = other752.message; - __isset = other752.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other774) { + message = other774.message; + __isset = other774.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -18204,13 +19139,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other753) : TException() { - message = other753.message; - __isset = other753.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other775) : TException() { + message = other775.message; + __isset = other775.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other754) { - message = other754.message; - __isset = other754.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other776) { + message = other776.message; + __isset = other776.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -18301,13 +19236,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other755) : TException() { - message = other755.message; - __isset = other755.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other777) : TException() { + message = other777.message; + __isset = other777.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other756) { - message = other756.message; - __isset = other756.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other778) { + message = other778.message; + __isset = other778.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -18398,13 +19333,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other757) : TException() { - message = other757.message; - __isset = other757.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other779) : TException() { + message = other779.message; + __isset = other779.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other758) { - message = other758.message; - __isset = other758.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other780) { + message = other780.message; + __isset = other780.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -18495,13 +19430,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other759) : TException() { - message = other759.message; - __isset = other759.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other781) : TException() { + message = other781.message; + __isset = other781.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other760) { - message = other760.message; - __isset = other760.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other782) { + message = other782.message; + __isset = other782.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -18592,13 +19527,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other761) : TException() { - message = other761.message; - __isset = other761.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other783) : TException() { + message = other783.message; + __isset = other783.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other762) { - message = other762.message; - __isset = other762.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other784) { + message = other784.message; + __isset = other784.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -18689,13 +19624,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other763) : TException() { - message = other763.message; - __isset = other763.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other785) : TException() { + message = other785.message; + __isset = other785.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other764) { - message = other764.message; - __isset = other764.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other786) { + message = other786.message; + __isset = other786.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -18786,13 +19721,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other765) : TException() { - message = other765.message; - __isset = other765.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other787) : TException() { + message = other787.message; + __isset = other787.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other766) { - message = other766.message; - __isset = other766.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other788) { + message = other788.message; + __isset = other788.__isset; return *this; } void IndexAlreadyExistsException::printTo(std::ostream& out) const { @@ -18883,13 +19818,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other767) : TException() { - message = other767.message; - __isset = other767.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other789) : TException() { + message = other789.message; + __isset = other789.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other768) { - message = other768.message; - __isset = other768.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other790) { + message = other790.message; + __isset = other790.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -18980,13 +19915,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other769) : TException() { - message = other769.message; - __isset = other769.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other791) : TException() { + message = other791.message; + __isset = other791.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other770) { - message = other770.message; - __isset = other770.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other792) { + message = other792.message; + __isset = other792.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -19077,13 +20012,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other771) : TException() { - message = other771.message; - __isset = other771.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other793) : TException() { + message = other793.message; + __isset = other793.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other772) { - message = other772.message; - __isset = other772.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other794) { + message = other794.message; + __isset = other794.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -19174,13 +20109,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other773) : TException() { - message = other773.message; - __isset = other773.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other795) : TException() { + message = other795.message; + __isset = other795.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other774) { - message = other774.message; - __isset = other774.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other796) { + message = other796.message; + __isset = other796.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -19271,13 +20206,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other775) : TException() { - message = other775.message; - __isset = other775.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other797) : TException() { + message = other797.message; + __isset = other797.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other776) { - message = other776.message; - __isset = other776.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other798) { + message = other798.message; + __isset = other798.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -19368,13 +20303,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other777) : TException() { - message = other777.message; - __isset = other777.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other799) : TException() { + message = other799.message; + __isset = other799.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other778) { - message = other778.message; - __isset = other778.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other800) { + message = other800.message; + __isset = other800.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -19465,13 +20400,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other779) : TException() { - message = other779.message; - __isset = other779.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other801) : TException() { + message = other801.message; + __isset = other801.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other780) { - message = other780.message; - __isset = other780.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other802) { + message = other802.message; + __isset = other802.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 1252a2dd21a8..87d9664c1f45 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -384,6 +384,22 @@ class CacheFileMetadataResult; class CacheFileMetadataRequest; +class GetNextWriteIdRequest; + +class GetNextWriteIdResult; + +class FinalizeWriteIdRequest; + +class FinalizeWriteIdResult; + +class HeartbeatWriteIdRequest; + +class HeartbeatWriteIdResult; + +class GetValidWriteIdsRequest; + +class GetValidWriteIdsResult; + class GetAllFunctionsResponse; class TableMeta; @@ -2042,7 +2058,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj) } typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), mmNextWriteId(false), mmWatermarkWriteId(false) {} bool tableName :1; bool dbName :1; bool owner :1; @@ -2057,6 +2073,8 @@ typedef struct _Table__isset { bool tableType :1; bool privileges :1; bool temporary :1; + bool mmNextWriteId :1; + bool mmWatermarkWriteId :1; } _Table__isset; class Table { @@ -2064,7 +2082,7 @@ class Table { Table(const Table&); Table& operator=(const Table&); - Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false) { + Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), mmNextWriteId(0), mmWatermarkWriteId(0) { } virtual ~Table() throw(); @@ -2082,6 +2100,8 @@ class Table { std::string tableType; PrincipalPrivilegeSet privileges; bool temporary; + int64_t mmNextWriteId; + int64_t mmWatermarkWriteId; _Table__isset __isset; @@ -2113,6 +2133,10 @@ class Table { void __set_temporary(const bool val); + void __set_mmNextWriteId(const int64_t val); + + void __set_mmWatermarkWriteId(const int64_t val); + bool operator == (const Table & rhs) const { if (!(tableName == rhs.tableName)) @@ -2147,6 +2171,14 @@ class Table { return false; else if (__isset.temporary && !(temporary == rhs.temporary)) return false; + if (__isset.mmNextWriteId != rhs.__isset.mmNextWriteId) + return false; + else if (__isset.mmNextWriteId && !(mmNextWriteId == rhs.mmNextWriteId)) + return false; + if (__isset.mmWatermarkWriteId != rhs.__isset.mmWatermarkWriteId) + return false; + else if (__isset.mmWatermarkWriteId && !(mmWatermarkWriteId == rhs.mmWatermarkWriteId)) + return false; return true; } bool operator != (const Table &rhs) const { @@ -7181,6 +7213,377 @@ inline std::ostream& operator<<(std::ostream& out, const CacheFileMetadataReques return out; } + +class GetNextWriteIdRequest { + public: + + GetNextWriteIdRequest(const GetNextWriteIdRequest&); + GetNextWriteIdRequest& operator=(const GetNextWriteIdRequest&); + GetNextWriteIdRequest() : dbName(), tblName() { + } + + virtual ~GetNextWriteIdRequest() throw(); + std::string dbName; + std::string tblName; + + void __set_dbName(const std::string& val); + + void __set_tblName(const std::string& val); + + bool operator == (const GetNextWriteIdRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tblName == rhs.tblName)) + return false; + return true; + } + bool operator != (const GetNextWriteIdRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetNextWriteIdRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetNextWriteIdRequest &a, GetNextWriteIdRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const GetNextWriteIdRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class GetNextWriteIdResult { + public: + + GetNextWriteIdResult(const GetNextWriteIdResult&); + GetNextWriteIdResult& operator=(const GetNextWriteIdResult&); + GetNextWriteIdResult() : writeId(0) { + } + + virtual ~GetNextWriteIdResult() throw(); + int64_t writeId; + + void __set_writeId(const int64_t val); + + bool operator == (const GetNextWriteIdResult & rhs) const + { + if (!(writeId == rhs.writeId)) + return false; + return true; + } + bool operator != (const GetNextWriteIdResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetNextWriteIdResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetNextWriteIdResult &a, GetNextWriteIdResult &b); + +inline std::ostream& operator<<(std::ostream& out, const GetNextWriteIdResult& obj) +{ + obj.printTo(out); + return out; +} + + +class FinalizeWriteIdRequest { + public: + + FinalizeWriteIdRequest(const FinalizeWriteIdRequest&); + FinalizeWriteIdRequest& operator=(const FinalizeWriteIdRequest&); + FinalizeWriteIdRequest() : dbName(), tblName(), writeId(0), commit(0) { + } + + virtual ~FinalizeWriteIdRequest() throw(); + std::string dbName; + std::string tblName; + int64_t writeId; + bool commit; + + void __set_dbName(const std::string& val); + + void __set_tblName(const std::string& val); + + void __set_writeId(const int64_t val); + + void __set_commit(const bool val); + + bool operator == (const FinalizeWriteIdRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tblName == rhs.tblName)) + return false; + if (!(writeId == rhs.writeId)) + return false; + if (!(commit == rhs.commit)) + return false; + return true; + } + bool operator != (const FinalizeWriteIdRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const FinalizeWriteIdRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(FinalizeWriteIdRequest &a, FinalizeWriteIdRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const FinalizeWriteIdRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class FinalizeWriteIdResult { + public: + + FinalizeWriteIdResult(const FinalizeWriteIdResult&); + FinalizeWriteIdResult& operator=(const FinalizeWriteIdResult&); + FinalizeWriteIdResult() { + } + + virtual ~FinalizeWriteIdResult() throw(); + + bool operator == (const FinalizeWriteIdResult & /* rhs */) const + { + return true; + } + bool operator != (const FinalizeWriteIdResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const FinalizeWriteIdResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(FinalizeWriteIdResult &a, FinalizeWriteIdResult &b); + +inline std::ostream& operator<<(std::ostream& out, const FinalizeWriteIdResult& obj) +{ + obj.printTo(out); + return out; +} + + +class HeartbeatWriteIdRequest { + public: + + HeartbeatWriteIdRequest(const HeartbeatWriteIdRequest&); + HeartbeatWriteIdRequest& operator=(const HeartbeatWriteIdRequest&); + HeartbeatWriteIdRequest() : dbName(), tblName(), writeId(0) { + } + + virtual ~HeartbeatWriteIdRequest() throw(); + std::string dbName; + std::string tblName; + int64_t writeId; + + void __set_dbName(const std::string& val); + + void __set_tblName(const std::string& val); + + void __set_writeId(const int64_t val); + + bool operator == (const HeartbeatWriteIdRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tblName == rhs.tblName)) + return false; + if (!(writeId == rhs.writeId)) + return false; + return true; + } + bool operator != (const HeartbeatWriteIdRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const HeartbeatWriteIdRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(HeartbeatWriteIdRequest &a, HeartbeatWriteIdRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const HeartbeatWriteIdRequest& obj) +{ + obj.printTo(out); + return out; +} + + +class HeartbeatWriteIdResult { + public: + + HeartbeatWriteIdResult(const HeartbeatWriteIdResult&); + HeartbeatWriteIdResult& operator=(const HeartbeatWriteIdResult&); + HeartbeatWriteIdResult() { + } + + virtual ~HeartbeatWriteIdResult() throw(); + + bool operator == (const HeartbeatWriteIdResult & /* rhs */) const + { + return true; + } + bool operator != (const HeartbeatWriteIdResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const HeartbeatWriteIdResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(HeartbeatWriteIdResult &a, HeartbeatWriteIdResult &b); + +inline std::ostream& operator<<(std::ostream& out, const HeartbeatWriteIdResult& obj) +{ + obj.printTo(out); + return out; +} + + +class GetValidWriteIdsRequest { + public: + + GetValidWriteIdsRequest(const GetValidWriteIdsRequest&); + GetValidWriteIdsRequest& operator=(const GetValidWriteIdsRequest&); + GetValidWriteIdsRequest() : dbName(), tblName() { + } + + virtual ~GetValidWriteIdsRequest() throw(); + std::string dbName; + std::string tblName; + + void __set_dbName(const std::string& val); + + void __set_tblName(const std::string& val); + + bool operator == (const GetValidWriteIdsRequest & rhs) const + { + if (!(dbName == rhs.dbName)) + return false; + if (!(tblName == rhs.tblName)) + return false; + return true; + } + bool operator != (const GetValidWriteIdsRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetValidWriteIdsRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetValidWriteIdsRequest &a, GetValidWriteIdsRequest &b); + +inline std::ostream& operator<<(std::ostream& out, const GetValidWriteIdsRequest& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _GetValidWriteIdsResult__isset { + _GetValidWriteIdsResult__isset() : areIdsValid(false), ids(false) {} + bool areIdsValid :1; + bool ids :1; +} _GetValidWriteIdsResult__isset; + +class GetValidWriteIdsResult { + public: + + GetValidWriteIdsResult(const GetValidWriteIdsResult&); + GetValidWriteIdsResult& operator=(const GetValidWriteIdsResult&); + GetValidWriteIdsResult() : lowWatermarkId(0), highWatermarkId(0), areIdsValid(0) { + } + + virtual ~GetValidWriteIdsResult() throw(); + int64_t lowWatermarkId; + int64_t highWatermarkId; + bool areIdsValid; + std::vector ids; + + _GetValidWriteIdsResult__isset __isset; + + void __set_lowWatermarkId(const int64_t val); + + void __set_highWatermarkId(const int64_t val); + + void __set_areIdsValid(const bool val); + + void __set_ids(const std::vector & val); + + bool operator == (const GetValidWriteIdsResult & rhs) const + { + if (!(lowWatermarkId == rhs.lowWatermarkId)) + return false; + if (!(highWatermarkId == rhs.highWatermarkId)) + return false; + if (__isset.areIdsValid != rhs.__isset.areIdsValid) + return false; + else if (__isset.areIdsValid && !(areIdsValid == rhs.areIdsValid)) + return false; + if (__isset.ids != rhs.__isset.ids) + return false; + else if (__isset.ids && !(ids == rhs.ids)) + return false; + return true; + } + bool operator != (const GetValidWriteIdsResult &rhs) const { + return !(*this == rhs); + } + + bool operator < (const GetValidWriteIdsResult & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(GetValidWriteIdsResult &a, GetValidWriteIdsResult &b); + +inline std::ostream& operator<<(std::ostream& out, const GetValidWriteIdsResult& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _GetAllFunctionsResponse__isset { _GetAllFunctionsResponse__isset() : functions(false) {} bool functions :1; diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java new file mode 100644 index 000000000000..f47460228128 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdRequest.java @@ -0,0 +1,684 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class FinalizeWriteIdRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FinalizeWriteIdRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)3); + private static final org.apache.thrift.protocol.TField COMMIT_FIELD_DESC = new org.apache.thrift.protocol.TField("commit", org.apache.thrift.protocol.TType.BOOL, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new FinalizeWriteIdRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new FinalizeWriteIdRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tblName; // required + private long writeId; // required + private boolean commit; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TBL_NAME((short)2, "tblName"), + WRITE_ID((short)3, "writeId"), + COMMIT((short)4, "commit"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // WRITE_ID + return WRITE_ID; + case 4: // COMMIT + return COMMIT; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEID_ISSET_ID = 0; + private static final int __COMMIT_ISSET_ID = 1; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.COMMIT, new org.apache.thrift.meta_data.FieldMetaData("commit", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FinalizeWriteIdRequest.class, metaDataMap); + } + + public FinalizeWriteIdRequest() { + } + + public FinalizeWriteIdRequest( + String dbName, + String tblName, + long writeId, + boolean commit) + { + this(); + this.dbName = dbName; + this.tblName = tblName; + this.writeId = writeId; + setWriteIdIsSet(true); + this.commit = commit; + setCommitIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public FinalizeWriteIdRequest(FinalizeWriteIdRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTblName()) { + this.tblName = other.tblName; + } + this.writeId = other.writeId; + this.commit = other.commit; + } + + public FinalizeWriteIdRequest deepCopy() { + return new FinalizeWriteIdRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tblName = null; + setWriteIdIsSet(false); + this.writeId = 0; + setCommitIsSet(false); + this.commit = false; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTblName() { + return this.tblName; + } + + public void setTblName(String tblName) { + this.tblName = tblName; + } + + public void unsetTblName() { + this.tblName = null; + } + + /** Returns true if field tblName is set (has been assigned a value) and false otherwise */ + public boolean isSetTblName() { + return this.tblName != null; + } + + public void setTblNameIsSet(boolean value) { + if (!value) { + this.tblName = null; + } + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public boolean isCommit() { + return this.commit; + } + + public void setCommit(boolean commit) { + this.commit = commit; + setCommitIsSet(true); + } + + public void unsetCommit() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __COMMIT_ISSET_ID); + } + + /** Returns true if field commit is set (has been assigned a value) and false otherwise */ + public boolean isSetCommit() { + return EncodingUtils.testBit(__isset_bitfield, __COMMIT_ISSET_ID); + } + + public void setCommitIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __COMMIT_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTblName(); + } else { + setTblName((String)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + case COMMIT: + if (value == null) { + unsetCommit(); + } else { + setCommit((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TBL_NAME: + return getTblName(); + + case WRITE_ID: + return getWriteId(); + + case COMMIT: + return isCommit(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TBL_NAME: + return isSetTblName(); + case WRITE_ID: + return isSetWriteId(); + case COMMIT: + return isSetCommit(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof FinalizeWriteIdRequest) + return this.equals((FinalizeWriteIdRequest)that); + return false; + } + + public boolean equals(FinalizeWriteIdRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tblName = true && this.isSetTblName(); + boolean that_present_tblName = true && that.isSetTblName(); + if (this_present_tblName || that_present_tblName) { + if (!(this_present_tblName && that_present_tblName)) + return false; + if (!this.tblName.equals(that.tblName)) + return false; + } + + boolean this_present_writeId = true; + boolean that_present_writeId = true; + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + boolean this_present_commit = true; + boolean that_present_commit = true; + if (this_present_commit || that_present_commit) { + if (!(this_present_commit && that_present_commit)) + return false; + if (this.commit != that.commit) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tblName = true && (isSetTblName()); + list.add(present_tblName); + if (present_tblName) + list.add(tblName); + + boolean present_writeId = true; + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + boolean present_commit = true; + list.add(present_commit); + if (present_commit) + list.add(commit); + + return list.hashCode(); + } + + @Override + public int compareTo(FinalizeWriteIdRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTblName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetCommit()).compareTo(other.isSetCommit()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCommit()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.commit, other.commit); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("FinalizeWriteIdRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tblName:"); + if (this.tblName == null) { + sb.append("null"); + } else { + sb.append(this.tblName); + } + first = false; + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + if (!first) sb.append(", "); + sb.append("commit:"); + sb.append(this.commit); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTblName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString()); + } + + if (!isSetWriteId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString()); + } + + if (!isSetCommit()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'commit' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class FinalizeWriteIdRequestStandardSchemeFactory implements SchemeFactory { + public FinalizeWriteIdRequestStandardScheme getScheme() { + return new FinalizeWriteIdRequestStandardScheme(); + } + } + + private static class FinalizeWriteIdRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // COMMIT + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.commit = iprot.readBool(); + struct.setCommitIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tblName != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tblName); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(COMMIT_FIELD_DESC); + oprot.writeBool(struct.commit); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class FinalizeWriteIdRequestTupleSchemeFactory implements SchemeFactory { + public FinalizeWriteIdRequestTupleScheme getScheme() { + return new FinalizeWriteIdRequestTupleScheme(); + } + } + + private static class FinalizeWriteIdRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tblName); + oprot.writeI64(struct.writeId); + oprot.writeBool(struct.commit); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + struct.commit = iprot.readBool(); + struct.setCommitIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java new file mode 100644 index 000000000000..8e8b504b9aac --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FinalizeWriteIdResult.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class FinalizeWriteIdResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FinalizeWriteIdResult"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new FinalizeWriteIdResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new FinalizeWriteIdResultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FinalizeWriteIdResult.class, metaDataMap); + } + + public FinalizeWriteIdResult() { + } + + /** + * Performs a deep copy on other. + */ + public FinalizeWriteIdResult(FinalizeWriteIdResult other) { + } + + public FinalizeWriteIdResult deepCopy() { + return new FinalizeWriteIdResult(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof FinalizeWriteIdResult) + return this.equals((FinalizeWriteIdResult)that); + return false; + } + + public boolean equals(FinalizeWriteIdResult that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(FinalizeWriteIdResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("FinalizeWriteIdResult("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class FinalizeWriteIdResultStandardSchemeFactory implements SchemeFactory { + public FinalizeWriteIdResultStandardScheme getScheme() { + return new FinalizeWriteIdResultStandardScheme(); + } + } + + private static class FinalizeWriteIdResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class FinalizeWriteIdResultTupleSchemeFactory implements SchemeFactory { + public FinalizeWriteIdResultTupleScheme getScheme() { + return new FinalizeWriteIdResultTupleScheme(); + } + } + + private static class FinalizeWriteIdResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, FinalizeWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index f427a3a393f9..49a1be25bf32 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list634 = iprot.readListBegin(); - struct.functions = new ArrayList(_list634.size); - Function _elem635; - for (int _i636 = 0; _i636 < _list634.size; ++_i636) + org.apache.thrift.protocol.TList _list642 = iprot.readListBegin(); + struct.functions = new ArrayList(_list642.size); + Function _elem643; + for (int _i644 = 0; _i644 < _list642.size; ++_i644) { - _elem635 = new Function(); - _elem635.read(iprot); - struct.functions.add(_elem635); + _elem643 = new Function(); + _elem643.read(iprot); + struct.functions.add(_elem643); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter637 : struct.functions) + for (Function _iter645 : struct.functions) { - _iter637.write(oprot); + _iter645.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter638 : struct.functions) + for (Function _iter646 : struct.functions) { - _iter638.write(oprot); + _iter646.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list639 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list639.size); - Function _elem640; - for (int _i641 = 0; _i641 < _list639.size; ++_i641) + org.apache.thrift.protocol.TList _list647 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list647.size); + Function _elem648; + for (int _i649 = 0; _i649 < _list647.size; ++_i649) { - _elem640 = new Function(); - _elem640.read(iprot); - struct.functions.add(_elem640); + _elem648 = new Function(); + _elem648.read(iprot); + struct.functions.add(_elem648); } } struct.setFunctionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java new file mode 100644 index 000000000000..dab13fded3a7 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdRequest.java @@ -0,0 +1,490 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class GetNextWriteIdRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetNextWriteIdRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetNextWriteIdRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetNextWriteIdRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tblName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TBL_NAME((short)2, "tblName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetNextWriteIdRequest.class, metaDataMap); + } + + public GetNextWriteIdRequest() { + } + + public GetNextWriteIdRequest( + String dbName, + String tblName) + { + this(); + this.dbName = dbName; + this.tblName = tblName; + } + + /** + * Performs a deep copy on other. + */ + public GetNextWriteIdRequest(GetNextWriteIdRequest other) { + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTblName()) { + this.tblName = other.tblName; + } + } + + public GetNextWriteIdRequest deepCopy() { + return new GetNextWriteIdRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tblName = null; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTblName() { + return this.tblName; + } + + public void setTblName(String tblName) { + this.tblName = tblName; + } + + public void unsetTblName() { + this.tblName = null; + } + + /** Returns true if field tblName is set (has been assigned a value) and false otherwise */ + public boolean isSetTblName() { + return this.tblName != null; + } + + public void setTblNameIsSet(boolean value) { + if (!value) { + this.tblName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTblName(); + } else { + setTblName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TBL_NAME: + return getTblName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TBL_NAME: + return isSetTblName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetNextWriteIdRequest) + return this.equals((GetNextWriteIdRequest)that); + return false; + } + + public boolean equals(GetNextWriteIdRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tblName = true && this.isSetTblName(); + boolean that_present_tblName = true && that.isSetTblName(); + if (this_present_tblName || that_present_tblName) { + if (!(this_present_tblName && that_present_tblName)) + return false; + if (!this.tblName.equals(that.tblName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tblName = true && (isSetTblName()); + list.add(present_tblName); + if (present_tblName) + list.add(tblName); + + return list.hashCode(); + } + + @Override + public int compareTo(GetNextWriteIdRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTblName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetNextWriteIdRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tblName:"); + if (this.tblName == null) { + sb.append("null"); + } else { + sb.append(this.tblName); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTblName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetNextWriteIdRequestStandardSchemeFactory implements SchemeFactory { + public GetNextWriteIdRequestStandardScheme getScheme() { + return new GetNextWriteIdRequestStandardScheme(); + } + } + + private static class GetNextWriteIdRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tblName != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tblName); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetNextWriteIdRequestTupleSchemeFactory implements SchemeFactory { + public GetNextWriteIdRequestTupleScheme getScheme() { + return new GetNextWriteIdRequestTupleScheme(); + } + } + + private static class GetNextWriteIdRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tblName); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java new file mode 100644 index 000000000000..97ad28421c25 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetNextWriteIdResult.java @@ -0,0 +1,387 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class GetNextWriteIdResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetNextWriteIdResult"); + + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetNextWriteIdResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetNextWriteIdResultTupleSchemeFactory()); + } + + private long writeId; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + WRITE_ID((short)1, "writeId"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // WRITE_ID + return WRITE_ID; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetNextWriteIdResult.class, metaDataMap); + } + + public GetNextWriteIdResult() { + } + + public GetNextWriteIdResult( + long writeId) + { + this(); + this.writeId = writeId; + setWriteIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public GetNextWriteIdResult(GetNextWriteIdResult other) { + __isset_bitfield = other.__isset_bitfield; + this.writeId = other.writeId; + } + + public GetNextWriteIdResult deepCopy() { + return new GetNextWriteIdResult(this); + } + + @Override + public void clear() { + setWriteIdIsSet(false); + this.writeId = 0; + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case WRITE_ID: + return getWriteId(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case WRITE_ID: + return isSetWriteId(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetNextWriteIdResult) + return this.equals((GetNextWriteIdResult)that); + return false; + } + + public boolean equals(GetNextWriteIdResult that) { + if (that == null) + return false; + + boolean this_present_writeId = true; + boolean that_present_writeId = true; + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_writeId = true; + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + return list.hashCode(); + } + + @Override + public int compareTo(GetNextWriteIdResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetNextWriteIdResult("); + boolean first = true; + + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetWriteId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetNextWriteIdResultStandardSchemeFactory implements SchemeFactory { + public GetNextWriteIdResultStandardScheme getScheme() { + return new GetNextWriteIdResultStandardScheme(); + } + } + + private static class GetNextWriteIdResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetNextWriteIdResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetNextWriteIdResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetNextWriteIdResultTupleSchemeFactory implements SchemeFactory { + public GetNextWriteIdResultTupleScheme getScheme() { + return new GetNextWriteIdResultTupleScheme(); + } + } + + private static class GetNextWriteIdResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.writeId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetNextWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java new file mode 100644 index 000000000000..90f103a4377d --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java @@ -0,0 +1,490 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class GetValidWriteIdsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetValidWriteIdsRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetValidWriteIdsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetValidWriteIdsRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tblName; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TBL_NAME((short)2, "tblName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetValidWriteIdsRequest.class, metaDataMap); + } + + public GetValidWriteIdsRequest() { + } + + public GetValidWriteIdsRequest( + String dbName, + String tblName) + { + this(); + this.dbName = dbName; + this.tblName = tblName; + } + + /** + * Performs a deep copy on other. + */ + public GetValidWriteIdsRequest(GetValidWriteIdsRequest other) { + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTblName()) { + this.tblName = other.tblName; + } + } + + public GetValidWriteIdsRequest deepCopy() { + return new GetValidWriteIdsRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tblName = null; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTblName() { + return this.tblName; + } + + public void setTblName(String tblName) { + this.tblName = tblName; + } + + public void unsetTblName() { + this.tblName = null; + } + + /** Returns true if field tblName is set (has been assigned a value) and false otherwise */ + public boolean isSetTblName() { + return this.tblName != null; + } + + public void setTblNameIsSet(boolean value) { + if (!value) { + this.tblName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTblName(); + } else { + setTblName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TBL_NAME: + return getTblName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TBL_NAME: + return isSetTblName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetValidWriteIdsRequest) + return this.equals((GetValidWriteIdsRequest)that); + return false; + } + + public boolean equals(GetValidWriteIdsRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tblName = true && this.isSetTblName(); + boolean that_present_tblName = true && that.isSetTblName(); + if (this_present_tblName || that_present_tblName) { + if (!(this_present_tblName && that_present_tblName)) + return false; + if (!this.tblName.equals(that.tblName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tblName = true && (isSetTblName()); + list.add(present_tblName); + if (present_tblName) + list.add(tblName); + + return list.hashCode(); + } + + @Override + public int compareTo(GetValidWriteIdsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTblName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetValidWriteIdsRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tblName:"); + if (this.tblName == null) { + sb.append("null"); + } else { + sb.append(this.tblName); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTblName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetValidWriteIdsRequestStandardSchemeFactory implements SchemeFactory { + public GetValidWriteIdsRequestStandardScheme getScheme() { + return new GetValidWriteIdsRequestStandardScheme(); + } + } + + private static class GetValidWriteIdsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tblName != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tblName); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetValidWriteIdsRequestTupleSchemeFactory implements SchemeFactory { + public GetValidWriteIdsRequestTupleScheme getScheme() { + return new GetValidWriteIdsRequestTupleScheme(); + } + } + + private static class GetValidWriteIdsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tblName); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java new file mode 100644 index 000000000000..a51f321fa2a3 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResult.java @@ -0,0 +1,740 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class GetValidWriteIdsResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetValidWriteIdsResult"); + + private static final org.apache.thrift.protocol.TField LOW_WATERMARK_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("lowWatermarkId", org.apache.thrift.protocol.TType.I64, (short)1); + private static final org.apache.thrift.protocol.TField HIGH_WATERMARK_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("highWatermarkId", org.apache.thrift.protocol.TType.I64, (short)2); + private static final org.apache.thrift.protocol.TField ARE_IDS_VALID_FIELD_DESC = new org.apache.thrift.protocol.TField("areIdsValid", org.apache.thrift.protocol.TType.BOOL, (short)3); + private static final org.apache.thrift.protocol.TField IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("ids", org.apache.thrift.protocol.TType.LIST, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetValidWriteIdsResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetValidWriteIdsResultTupleSchemeFactory()); + } + + private long lowWatermarkId; // required + private long highWatermarkId; // required + private boolean areIdsValid; // optional + private List ids; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + LOW_WATERMARK_ID((short)1, "lowWatermarkId"), + HIGH_WATERMARK_ID((short)2, "highWatermarkId"), + ARE_IDS_VALID((short)3, "areIdsValid"), + IDS((short)4, "ids"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // LOW_WATERMARK_ID + return LOW_WATERMARK_ID; + case 2: // HIGH_WATERMARK_ID + return HIGH_WATERMARK_ID; + case 3: // ARE_IDS_VALID + return ARE_IDS_VALID; + case 4: // IDS + return IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __LOWWATERMARKID_ISSET_ID = 0; + private static final int __HIGHWATERMARKID_ISSET_ID = 1; + private static final int __AREIDSVALID_ISSET_ID = 2; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.ARE_IDS_VALID,_Fields.IDS}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.LOW_WATERMARK_ID, new org.apache.thrift.meta_data.FieldMetaData("lowWatermarkId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.HIGH_WATERMARK_ID, new org.apache.thrift.meta_data.FieldMetaData("highWatermarkId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.ARE_IDS_VALID, new org.apache.thrift.meta_data.FieldMetaData("areIdsValid", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.IDS, new org.apache.thrift.meta_data.FieldMetaData("ids", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetValidWriteIdsResult.class, metaDataMap); + } + + public GetValidWriteIdsResult() { + } + + public GetValidWriteIdsResult( + long lowWatermarkId, + long highWatermarkId) + { + this(); + this.lowWatermarkId = lowWatermarkId; + setLowWatermarkIdIsSet(true); + this.highWatermarkId = highWatermarkId; + setHighWatermarkIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public GetValidWriteIdsResult(GetValidWriteIdsResult other) { + __isset_bitfield = other.__isset_bitfield; + this.lowWatermarkId = other.lowWatermarkId; + this.highWatermarkId = other.highWatermarkId; + this.areIdsValid = other.areIdsValid; + if (other.isSetIds()) { + List __this__ids = new ArrayList(other.ids); + this.ids = __this__ids; + } + } + + public GetValidWriteIdsResult deepCopy() { + return new GetValidWriteIdsResult(this); + } + + @Override + public void clear() { + setLowWatermarkIdIsSet(false); + this.lowWatermarkId = 0; + setHighWatermarkIdIsSet(false); + this.highWatermarkId = 0; + setAreIdsValidIsSet(false); + this.areIdsValid = false; + this.ids = null; + } + + public long getLowWatermarkId() { + return this.lowWatermarkId; + } + + public void setLowWatermarkId(long lowWatermarkId) { + this.lowWatermarkId = lowWatermarkId; + setLowWatermarkIdIsSet(true); + } + + public void unsetLowWatermarkId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOWWATERMARKID_ISSET_ID); + } + + /** Returns true if field lowWatermarkId is set (has been assigned a value) and false otherwise */ + public boolean isSetLowWatermarkId() { + return EncodingUtils.testBit(__isset_bitfield, __LOWWATERMARKID_ISSET_ID); + } + + public void setLowWatermarkIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOWWATERMARKID_ISSET_ID, value); + } + + public long getHighWatermarkId() { + return this.highWatermarkId; + } + + public void setHighWatermarkId(long highWatermarkId) { + this.highWatermarkId = highWatermarkId; + setHighWatermarkIdIsSet(true); + } + + public void unsetHighWatermarkId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HIGHWATERMARKID_ISSET_ID); + } + + /** Returns true if field highWatermarkId is set (has been assigned a value) and false otherwise */ + public boolean isSetHighWatermarkId() { + return EncodingUtils.testBit(__isset_bitfield, __HIGHWATERMARKID_ISSET_ID); + } + + public void setHighWatermarkIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HIGHWATERMARKID_ISSET_ID, value); + } + + public boolean isAreIdsValid() { + return this.areIdsValid; + } + + public void setAreIdsValid(boolean areIdsValid) { + this.areIdsValid = areIdsValid; + setAreIdsValidIsSet(true); + } + + public void unsetAreIdsValid() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AREIDSVALID_ISSET_ID); + } + + /** Returns true if field areIdsValid is set (has been assigned a value) and false otherwise */ + public boolean isSetAreIdsValid() { + return EncodingUtils.testBit(__isset_bitfield, __AREIDSVALID_ISSET_ID); + } + + public void setAreIdsValidIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AREIDSVALID_ISSET_ID, value); + } + + public int getIdsSize() { + return (this.ids == null) ? 0 : this.ids.size(); + } + + public java.util.Iterator getIdsIterator() { + return (this.ids == null) ? null : this.ids.iterator(); + } + + public void addToIds(long elem) { + if (this.ids == null) { + this.ids = new ArrayList(); + } + this.ids.add(elem); + } + + public List getIds() { + return this.ids; + } + + public void setIds(List ids) { + this.ids = ids; + } + + public void unsetIds() { + this.ids = null; + } + + /** Returns true if field ids is set (has been assigned a value) and false otherwise */ + public boolean isSetIds() { + return this.ids != null; + } + + public void setIdsIsSet(boolean value) { + if (!value) { + this.ids = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case LOW_WATERMARK_ID: + if (value == null) { + unsetLowWatermarkId(); + } else { + setLowWatermarkId((Long)value); + } + break; + + case HIGH_WATERMARK_ID: + if (value == null) { + unsetHighWatermarkId(); + } else { + setHighWatermarkId((Long)value); + } + break; + + case ARE_IDS_VALID: + if (value == null) { + unsetAreIdsValid(); + } else { + setAreIdsValid((Boolean)value); + } + break; + + case IDS: + if (value == null) { + unsetIds(); + } else { + setIds((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case LOW_WATERMARK_ID: + return getLowWatermarkId(); + + case HIGH_WATERMARK_ID: + return getHighWatermarkId(); + + case ARE_IDS_VALID: + return isAreIdsValid(); + + case IDS: + return getIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case LOW_WATERMARK_ID: + return isSetLowWatermarkId(); + case HIGH_WATERMARK_ID: + return isSetHighWatermarkId(); + case ARE_IDS_VALID: + return isSetAreIdsValid(); + case IDS: + return isSetIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetValidWriteIdsResult) + return this.equals((GetValidWriteIdsResult)that); + return false; + } + + public boolean equals(GetValidWriteIdsResult that) { + if (that == null) + return false; + + boolean this_present_lowWatermarkId = true; + boolean that_present_lowWatermarkId = true; + if (this_present_lowWatermarkId || that_present_lowWatermarkId) { + if (!(this_present_lowWatermarkId && that_present_lowWatermarkId)) + return false; + if (this.lowWatermarkId != that.lowWatermarkId) + return false; + } + + boolean this_present_highWatermarkId = true; + boolean that_present_highWatermarkId = true; + if (this_present_highWatermarkId || that_present_highWatermarkId) { + if (!(this_present_highWatermarkId && that_present_highWatermarkId)) + return false; + if (this.highWatermarkId != that.highWatermarkId) + return false; + } + + boolean this_present_areIdsValid = true && this.isSetAreIdsValid(); + boolean that_present_areIdsValid = true && that.isSetAreIdsValid(); + if (this_present_areIdsValid || that_present_areIdsValid) { + if (!(this_present_areIdsValid && that_present_areIdsValid)) + return false; + if (this.areIdsValid != that.areIdsValid) + return false; + } + + boolean this_present_ids = true && this.isSetIds(); + boolean that_present_ids = true && that.isSetIds(); + if (this_present_ids || that_present_ids) { + if (!(this_present_ids && that_present_ids)) + return false; + if (!this.ids.equals(that.ids)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_lowWatermarkId = true; + list.add(present_lowWatermarkId); + if (present_lowWatermarkId) + list.add(lowWatermarkId); + + boolean present_highWatermarkId = true; + list.add(present_highWatermarkId); + if (present_highWatermarkId) + list.add(highWatermarkId); + + boolean present_areIdsValid = true && (isSetAreIdsValid()); + list.add(present_areIdsValid); + if (present_areIdsValid) + list.add(areIdsValid); + + boolean present_ids = true && (isSetIds()); + list.add(present_ids); + if (present_ids) + list.add(ids); + + return list.hashCode(); + } + + @Override + public int compareTo(GetValidWriteIdsResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetLowWatermarkId()).compareTo(other.isSetLowWatermarkId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetLowWatermarkId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lowWatermarkId, other.lowWatermarkId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetHighWatermarkId()).compareTo(other.isSetHighWatermarkId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetHighWatermarkId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.highWatermarkId, other.highWatermarkId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetAreIdsValid()).compareTo(other.isSetAreIdsValid()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetAreIdsValid()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.areIdsValid, other.areIdsValid); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIds()).compareTo(other.isSetIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ids, other.ids); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetValidWriteIdsResult("); + boolean first = true; + + sb.append("lowWatermarkId:"); + sb.append(this.lowWatermarkId); + first = false; + if (!first) sb.append(", "); + sb.append("highWatermarkId:"); + sb.append(this.highWatermarkId); + first = false; + if (isSetAreIdsValid()) { + if (!first) sb.append(", "); + sb.append("areIdsValid:"); + sb.append(this.areIdsValid); + first = false; + } + if (isSetIds()) { + if (!first) sb.append(", "); + sb.append("ids:"); + if (this.ids == null) { + sb.append("null"); + } else { + sb.append(this.ids); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetLowWatermarkId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'lowWatermarkId' is unset! Struct:" + toString()); + } + + if (!isSetHighWatermarkId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'highWatermarkId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetValidWriteIdsResultStandardSchemeFactory implements SchemeFactory { + public GetValidWriteIdsResultStandardScheme getScheme() { + return new GetValidWriteIdsResultStandardScheme(); + } + } + + private static class GetValidWriteIdsResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // LOW_WATERMARK_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.lowWatermarkId = iprot.readI64(); + struct.setLowWatermarkIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // HIGH_WATERMARK_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.highWatermarkId = iprot.readI64(); + struct.setHighWatermarkIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // ARE_IDS_VALID + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.areIdsValid = iprot.readBool(); + struct.setAreIdsValidIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // IDS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list634 = iprot.readListBegin(); + struct.ids = new ArrayList(_list634.size); + long _elem635; + for (int _i636 = 0; _i636 < _list634.size; ++_i636) + { + _elem635 = iprot.readI64(); + struct.ids.add(_elem635); + } + iprot.readListEnd(); + } + struct.setIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(LOW_WATERMARK_ID_FIELD_DESC); + oprot.writeI64(struct.lowWatermarkId); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(HIGH_WATERMARK_ID_FIELD_DESC); + oprot.writeI64(struct.highWatermarkId); + oprot.writeFieldEnd(); + if (struct.isSetAreIdsValid()) { + oprot.writeFieldBegin(ARE_IDS_VALID_FIELD_DESC); + oprot.writeBool(struct.areIdsValid); + oprot.writeFieldEnd(); + } + if (struct.ids != null) { + if (struct.isSetIds()) { + oprot.writeFieldBegin(IDS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.ids.size())); + for (long _iter637 : struct.ids) + { + oprot.writeI64(_iter637); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetValidWriteIdsResultTupleSchemeFactory implements SchemeFactory { + public GetValidWriteIdsResultTupleScheme getScheme() { + return new GetValidWriteIdsResultTupleScheme(); + } + } + + private static class GetValidWriteIdsResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeI64(struct.lowWatermarkId); + oprot.writeI64(struct.highWatermarkId); + BitSet optionals = new BitSet(); + if (struct.isSetAreIdsValid()) { + optionals.set(0); + } + if (struct.isSetIds()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetAreIdsValid()) { + oprot.writeBool(struct.areIdsValid); + } + if (struct.isSetIds()) { + { + oprot.writeI32(struct.ids.size()); + for (long _iter638 : struct.ids) + { + oprot.writeI64(_iter638); + } + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.lowWatermarkId = iprot.readI64(); + struct.setLowWatermarkIdIsSet(true); + struct.highWatermarkId = iprot.readI64(); + struct.setHighWatermarkIdIsSet(true); + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.areIdsValid = iprot.readBool(); + struct.setAreIdsValidIsSet(true); + } + if (incoming.get(1)) { + { + org.apache.thrift.protocol.TList _list639 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.ids = new ArrayList(_list639.size); + long _elem640; + for (int _i641 = 0; _i641 < _list639.size; ++_i641) + { + _elem640 = iprot.readI64(); + struct.ids.add(_elem640); + } + } + struct.setIdsIsSet(true); + } + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java new file mode 100644 index 000000000000..0c1849c5f2c9 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdRequest.java @@ -0,0 +1,589 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class HeartbeatWriteIdRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatWriteIdRequest"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new HeartbeatWriteIdRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new HeartbeatWriteIdRequestTupleSchemeFactory()); + } + + private String dbName; // required + private String tblName; // required + private long writeId; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "dbName"), + TBL_NAME((short)2, "tblName"), + WRITE_ID((short)3, "writeId"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // WRITE_ID + return WRITE_ID; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __WRITEID_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HeartbeatWriteIdRequest.class, metaDataMap); + } + + public HeartbeatWriteIdRequest() { + } + + public HeartbeatWriteIdRequest( + String dbName, + String tblName, + long writeId) + { + this(); + this.dbName = dbName; + this.tblName = tblName; + this.writeId = writeId; + setWriteIdIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public HeartbeatWriteIdRequest(HeartbeatWriteIdRequest other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDbName()) { + this.dbName = other.dbName; + } + if (other.isSetTblName()) { + this.tblName = other.tblName; + } + this.writeId = other.writeId; + } + + public HeartbeatWriteIdRequest deepCopy() { + return new HeartbeatWriteIdRequest(this); + } + + @Override + public void clear() { + this.dbName = null; + this.tblName = null; + setWriteIdIsSet(false); + this.writeId = 0; + } + + public String getDbName() { + return this.dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public void unsetDbName() { + this.dbName = null; + } + + /** Returns true if field dbName is set (has been assigned a value) and false otherwise */ + public boolean isSetDbName() { + return this.dbName != null; + } + + public void setDbNameIsSet(boolean value) { + if (!value) { + this.dbName = null; + } + } + + public String getTblName() { + return this.tblName; + } + + public void setTblName(String tblName) { + this.tblName = tblName; + } + + public void unsetTblName() { + this.tblName = null; + } + + /** Returns true if field tblName is set (has been assigned a value) and false otherwise */ + public boolean isSetTblName() { + return this.tblName != null; + } + + public void setTblNameIsSet(boolean value) { + if (!value) { + this.tblName = null; + } + } + + public long getWriteId() { + return this.writeId; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + setWriteIdIsSet(true); + } + + public void unsetWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + /** Returns true if field writeId is set (has been assigned a value) and false otherwise */ + public boolean isSetWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID); + } + + public void setWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDbName(); + } else { + setDbName((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTblName(); + } else { + setTblName((String)value); + } + break; + + case WRITE_ID: + if (value == null) { + unsetWriteId(); + } else { + setWriteId((Long)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDbName(); + + case TBL_NAME: + return getTblName(); + + case WRITE_ID: + return getWriteId(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDbName(); + case TBL_NAME: + return isSetTblName(); + case WRITE_ID: + return isSetWriteId(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof HeartbeatWriteIdRequest) + return this.equals((HeartbeatWriteIdRequest)that); + return false; + } + + public boolean equals(HeartbeatWriteIdRequest that) { + if (that == null) + return false; + + boolean this_present_dbName = true && this.isSetDbName(); + boolean that_present_dbName = true && that.isSetDbName(); + if (this_present_dbName || that_present_dbName) { + if (!(this_present_dbName && that_present_dbName)) + return false; + if (!this.dbName.equals(that.dbName)) + return false; + } + + boolean this_present_tblName = true && this.isSetTblName(); + boolean that_present_tblName = true && that.isSetTblName(); + if (this_present_tblName || that_present_tblName) { + if (!(this_present_tblName && that_present_tblName)) + return false; + if (!this.tblName.equals(that.tblName)) + return false; + } + + boolean this_present_writeId = true; + boolean that_present_writeId = true; + if (this_present_writeId || that_present_writeId) { + if (!(this_present_writeId && that_present_writeId)) + return false; + if (this.writeId != that.writeId) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_dbName = true && (isSetDbName()); + list.add(present_dbName); + if (present_dbName) + list.add(dbName); + + boolean present_tblName = true && (isSetTblName()); + list.add(present_tblName); + if (present_tblName) + list.add(tblName); + + boolean present_writeId = true; + list.add(present_writeId); + if (present_writeId) + list.add(writeId); + + return list.hashCode(); + } + + @Override + public int compareTo(HeartbeatWriteIdRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDbName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTblName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("HeartbeatWriteIdRequest("); + boolean first = true; + + sb.append("dbName:"); + if (this.dbName == null) { + sb.append("null"); + } else { + sb.append(this.dbName); + } + first = false; + if (!first) sb.append(", "); + sb.append("tblName:"); + if (this.tblName == null) { + sb.append("null"); + } else { + sb.append(this.tblName); + } + first = false; + if (!first) sb.append(", "); + sb.append("writeId:"); + sb.append(this.writeId); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetDbName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString()); + } + + if (!isSetTblName()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString()); + } + + if (!isSetWriteId()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeId' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class HeartbeatWriteIdRequestStandardSchemeFactory implements SchemeFactory { + public HeartbeatWriteIdRequestStandardScheme getScheme() { + return new HeartbeatWriteIdRequestStandardScheme(); + } + } + + private static class HeartbeatWriteIdRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.dbName != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.dbName); + oprot.writeFieldEnd(); + } + if (struct.tblName != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tblName); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.writeId); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class HeartbeatWriteIdRequestTupleSchemeFactory implements SchemeFactory { + public HeartbeatWriteIdRequestTupleScheme getScheme() { + return new HeartbeatWriteIdRequestTupleScheme(); + } + } + + private static class HeartbeatWriteIdRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + oprot.writeString(struct.dbName); + oprot.writeString(struct.tblName); + oprot.writeI64(struct.writeId); + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.dbName = iprot.readString(); + struct.setDbNameIsSet(true); + struct.tblName = iprot.readString(); + struct.setTblNameIsSet(true); + struct.writeId = iprot.readI64(); + struct.setWriteIdIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java new file mode 100644 index 000000000000..ae6f25e815b0 --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatWriteIdResult.java @@ -0,0 +1,283 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class HeartbeatWriteIdResult implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatWriteIdResult"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new HeartbeatWriteIdResultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new HeartbeatWriteIdResultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HeartbeatWriteIdResult.class, metaDataMap); + } + + public HeartbeatWriteIdResult() { + } + + /** + * Performs a deep copy on other. + */ + public HeartbeatWriteIdResult(HeartbeatWriteIdResult other) { + } + + public HeartbeatWriteIdResult deepCopy() { + return new HeartbeatWriteIdResult(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof HeartbeatWriteIdResult) + return this.equals((HeartbeatWriteIdResult)that); + return false; + } + + public boolean equals(HeartbeatWriteIdResult that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(HeartbeatWriteIdResult other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("HeartbeatWriteIdResult("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class HeartbeatWriteIdResultStandardSchemeFactory implements SchemeFactory { + public HeartbeatWriteIdResultStandardScheme getScheme() { + return new HeartbeatWriteIdResultStandardScheme(); + } + } + + private static class HeartbeatWriteIdResultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class HeartbeatWriteIdResultTupleSchemeFactory implements SchemeFactory { + public HeartbeatWriteIdResultTupleScheme getScheme() { + return new HeartbeatWriteIdResultTupleScheme(); + } + } + + private static class HeartbeatWriteIdResultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatWriteIdResult struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index 5d683fb615e2..9c228c749e34 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -52,6 +52,8 @@ public class Table implements org.apache.thrift.TBase, jav private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)12); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)13); private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14); + private static final org.apache.thrift.protocol.TField MM_NEXT_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("mmNextWriteId", org.apache.thrift.protocol.TType.I64, (short)15); + private static final org.apache.thrift.protocol.TField MM_WATERMARK_WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("mmWatermarkWriteId", org.apache.thrift.protocol.TType.I64, (short)16); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -73,6 +75,8 @@ public class Table implements org.apache.thrift.TBase, jav private String tableType; // required private PrincipalPrivilegeSet privileges; // optional private boolean temporary; // optional + private long mmNextWriteId; // optional + private long mmWatermarkWriteId; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -89,7 +93,9 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { VIEW_EXPANDED_TEXT((short)11, "viewExpandedText"), TABLE_TYPE((short)12, "tableType"), PRIVILEGES((short)13, "privileges"), - TEMPORARY((short)14, "temporary"); + TEMPORARY((short)14, "temporary"), + MM_NEXT_WRITE_ID((short)15, "mmNextWriteId"), + MM_WATERMARK_WRITE_ID((short)16, "mmWatermarkWriteId"); private static final Map byName = new HashMap(); @@ -132,6 +138,10 @@ public static _Fields findByThriftId(int fieldId) { return PRIVILEGES; case 14: // TEMPORARY return TEMPORARY; + case 15: // MM_NEXT_WRITE_ID + return MM_NEXT_WRITE_ID; + case 16: // MM_WATERMARK_WRITE_ID + return MM_WATERMARK_WRITE_ID; default: return null; } @@ -176,8 +186,10 @@ public String getFieldName() { private static final int __LASTACCESSTIME_ISSET_ID = 1; private static final int __RETENTION_ISSET_ID = 2; private static final int __TEMPORARY_ISSET_ID = 3; + private static final int __MMNEXTWRITEID_ISSET_ID = 4; + private static final int __MMWATERMARKWRITEID_ISSET_ID = 5; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY}; + private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.MM_NEXT_WRITE_ID,_Fields.MM_WATERMARK_WRITE_ID}; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -212,6 +224,10 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); tmpMap.put(_Fields.TEMPORARY, new org.apache.thrift.meta_data.FieldMetaData("temporary", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.MM_NEXT_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("mmNextWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); + tmpMap.put(_Fields.MM_WATERMARK_WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("mmWatermarkWriteId", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap); } @@ -297,6 +313,8 @@ public Table(Table other) { this.privileges = new PrincipalPrivilegeSet(other.privileges); } this.temporary = other.temporary; + this.mmNextWriteId = other.mmNextWriteId; + this.mmWatermarkWriteId = other.mmWatermarkWriteId; } public Table deepCopy() { @@ -323,6 +341,10 @@ public void clear() { this.privileges = null; this.temporary = false; + setMmNextWriteIdIsSet(false); + this.mmNextWriteId = 0; + setMmWatermarkWriteIdIsSet(false); + this.mmWatermarkWriteId = 0; } public String getTableName() { @@ -669,6 +691,50 @@ public void setTemporaryIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TEMPORARY_ISSET_ID, value); } + public long getMmNextWriteId() { + return this.mmNextWriteId; + } + + public void setMmNextWriteId(long mmNextWriteId) { + this.mmNextWriteId = mmNextWriteId; + setMmNextWriteIdIsSet(true); + } + + public void unsetMmNextWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MMNEXTWRITEID_ISSET_ID); + } + + /** Returns true if field mmNextWriteId is set (has been assigned a value) and false otherwise */ + public boolean isSetMmNextWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __MMNEXTWRITEID_ISSET_ID); + } + + public void setMmNextWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MMNEXTWRITEID_ISSET_ID, value); + } + + public long getMmWatermarkWriteId() { + return this.mmWatermarkWriteId; + } + + public void setMmWatermarkWriteId(long mmWatermarkWriteId) { + this.mmWatermarkWriteId = mmWatermarkWriteId; + setMmWatermarkWriteIdIsSet(true); + } + + public void unsetMmWatermarkWriteId() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MMWATERMARKWRITEID_ISSET_ID); + } + + /** Returns true if field mmWatermarkWriteId is set (has been assigned a value) and false otherwise */ + public boolean isSetMmWatermarkWriteId() { + return EncodingUtils.testBit(__isset_bitfield, __MMWATERMARKWRITEID_ISSET_ID); + } + + public void setMmWatermarkWriteIdIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MMWATERMARKWRITEID_ISSET_ID, value); + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_NAME: @@ -783,6 +849,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case MM_NEXT_WRITE_ID: + if (value == null) { + unsetMmNextWriteId(); + } else { + setMmNextWriteId((Long)value); + } + break; + + case MM_WATERMARK_WRITE_ID: + if (value == null) { + unsetMmWatermarkWriteId(); + } else { + setMmWatermarkWriteId((Long)value); + } + break; + } } @@ -830,6 +912,12 @@ public Object getFieldValue(_Fields field) { case TEMPORARY: return isTemporary(); + case MM_NEXT_WRITE_ID: + return getMmNextWriteId(); + + case MM_WATERMARK_WRITE_ID: + return getMmWatermarkWriteId(); + } throw new IllegalStateException(); } @@ -869,6 +957,10 @@ public boolean isSet(_Fields field) { return isSetPrivileges(); case TEMPORARY: return isSetTemporary(); + case MM_NEXT_WRITE_ID: + return isSetMmNextWriteId(); + case MM_WATERMARK_WRITE_ID: + return isSetMmWatermarkWriteId(); } throw new IllegalStateException(); } @@ -1012,6 +1104,24 @@ public boolean equals(Table that) { return false; } + boolean this_present_mmNextWriteId = true && this.isSetMmNextWriteId(); + boolean that_present_mmNextWriteId = true && that.isSetMmNextWriteId(); + if (this_present_mmNextWriteId || that_present_mmNextWriteId) { + if (!(this_present_mmNextWriteId && that_present_mmNextWriteId)) + return false; + if (this.mmNextWriteId != that.mmNextWriteId) + return false; + } + + boolean this_present_mmWatermarkWriteId = true && this.isSetMmWatermarkWriteId(); + boolean that_present_mmWatermarkWriteId = true && that.isSetMmWatermarkWriteId(); + if (this_present_mmWatermarkWriteId || that_present_mmWatermarkWriteId) { + if (!(this_present_mmWatermarkWriteId && that_present_mmWatermarkWriteId)) + return false; + if (this.mmWatermarkWriteId != that.mmWatermarkWriteId) + return false; + } + return true; } @@ -1089,6 +1199,16 @@ public int hashCode() { if (present_temporary) list.add(temporary); + boolean present_mmNextWriteId = true && (isSetMmNextWriteId()); + list.add(present_mmNextWriteId); + if (present_mmNextWriteId) + list.add(mmNextWriteId); + + boolean present_mmWatermarkWriteId = true && (isSetMmWatermarkWriteId()); + list.add(present_mmWatermarkWriteId); + if (present_mmWatermarkWriteId) + list.add(mmWatermarkWriteId); + return list.hashCode(); } @@ -1240,6 +1360,26 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetMmNextWriteId()).compareTo(other.isSetMmNextWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMmNextWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mmNextWriteId, other.mmNextWriteId); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMmWatermarkWriteId()).compareTo(other.isSetMmWatermarkWriteId()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMmWatermarkWriteId()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mmWatermarkWriteId, other.mmWatermarkWriteId); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1359,6 +1499,18 @@ public String toString() { sb.append(this.temporary); first = false; } + if (isSetMmNextWriteId()) { + if (!first) sb.append(", "); + sb.append("mmNextWriteId:"); + sb.append(this.mmNextWriteId); + first = false; + } + if (isSetMmWatermarkWriteId()) { + if (!first) sb.append(", "); + sb.append("mmWatermarkWriteId:"); + sb.append(this.mmWatermarkWriteId); + first = false; + } sb.append(")"); return sb.toString(); } @@ -1547,6 +1699,22 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 15: // MM_NEXT_WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.mmNextWriteId = iprot.readI64(); + struct.setMmNextWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 16: // MM_WATERMARK_WRITE_ID + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.mmWatermarkWriteId = iprot.readI64(); + struct.setMmWatermarkWriteIdIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1641,6 +1809,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeBool(struct.temporary); oprot.writeFieldEnd(); } + if (struct.isSetMmNextWriteId()) { + oprot.writeFieldBegin(MM_NEXT_WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.mmNextWriteId); + oprot.writeFieldEnd(); + } + if (struct.isSetMmWatermarkWriteId()) { + oprot.writeFieldBegin(MM_WATERMARK_WRITE_ID_FIELD_DESC); + oprot.writeI64(struct.mmWatermarkWriteId); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1701,7 +1879,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetTemporary()) { optionals.set(13); } - oprot.writeBitSet(optionals, 14); + if (struct.isSetMmNextWriteId()) { + optionals.set(14); + } + if (struct.isSetMmWatermarkWriteId()) { + optionals.set(15); + } + oprot.writeBitSet(optionals, 16); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -1757,12 +1941,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetTemporary()) { oprot.writeBool(struct.temporary); } + if (struct.isSetMmNextWriteId()) { + oprot.writeI64(struct.mmNextWriteId); + } + if (struct.isSetMmWatermarkWriteId()) { + oprot.writeI64(struct.mmWatermarkWriteId); + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(14); + BitSet incoming = iprot.readBitSet(16); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -1842,6 +2032,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.temporary = iprot.readBool(); struct.setTemporaryIsSet(true); } + if (incoming.get(14)) { + struct.mmNextWriteId = iprot.readI64(); + struct.setMmNextWriteIdIsSet(true); + } + if (incoming.get(15)) { + struct.mmWatermarkWriteId = iprot.readI64(); + struct.setMmWatermarkWriteIdIsSet(true); + } } } diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index d827d6c733cd..6fdd29a8dcbf 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -342,6 +342,14 @@ public interface Iface extends com.facebook.fb303.FacebookService.Iface { public CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req) throws org.apache.thrift.TException; + public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws org.apache.thrift.TException; + + public FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req) throws org.apache.thrift.TException; + + public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) throws org.apache.thrift.TException; + + public GetValidWriteIdsResult get_valid_write_ids(GetValidWriteIdsRequest req) throws org.apache.thrift.TException; + } public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface { @@ -646,6 +654,14 @@ public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIfa public void cache_file_metadata(CacheFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_next_write_id(GetNextWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void finalize_write_id(FinalizeWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void heartbeat_write_id(HeartbeatWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_valid_write_ids(GetValidWriteIdsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + } public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface { @@ -4995,6 +5011,98 @@ public CacheFileMetadataResult recv_cache_file_metadata() throws org.apache.thri throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "cache_file_metadata failed: unknown result"); } + public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws org.apache.thrift.TException + { + send_get_next_write_id(req); + return recv_get_next_write_id(); + } + + public void send_get_next_write_id(GetNextWriteIdRequest req) throws org.apache.thrift.TException + { + get_next_write_id_args args = new get_next_write_id_args(); + args.setReq(req); + sendBase("get_next_write_id", args); + } + + public GetNextWriteIdResult recv_get_next_write_id() throws org.apache.thrift.TException + { + get_next_write_id_result result = new get_next_write_id_result(); + receiveBase(result, "get_next_write_id"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_next_write_id failed: unknown result"); + } + + public FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req) throws org.apache.thrift.TException + { + send_finalize_write_id(req); + return recv_finalize_write_id(); + } + + public void send_finalize_write_id(FinalizeWriteIdRequest req) throws org.apache.thrift.TException + { + finalize_write_id_args args = new finalize_write_id_args(); + args.setReq(req); + sendBase("finalize_write_id", args); + } + + public FinalizeWriteIdResult recv_finalize_write_id() throws org.apache.thrift.TException + { + finalize_write_id_result result = new finalize_write_id_result(); + receiveBase(result, "finalize_write_id"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "finalize_write_id failed: unknown result"); + } + + public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) throws org.apache.thrift.TException + { + send_heartbeat_write_id(req); + return recv_heartbeat_write_id(); + } + + public void send_heartbeat_write_id(HeartbeatWriteIdRequest req) throws org.apache.thrift.TException + { + heartbeat_write_id_args args = new heartbeat_write_id_args(); + args.setReq(req); + sendBase("heartbeat_write_id", args); + } + + public HeartbeatWriteIdResult recv_heartbeat_write_id() throws org.apache.thrift.TException + { + heartbeat_write_id_result result = new heartbeat_write_id_result(); + receiveBase(result, "heartbeat_write_id"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "heartbeat_write_id failed: unknown result"); + } + + public GetValidWriteIdsResult get_valid_write_ids(GetValidWriteIdsRequest req) throws org.apache.thrift.TException + { + send_get_valid_write_ids(req); + return recv_get_valid_write_ids(); + } + + public void send_get_valid_write_ids(GetValidWriteIdsRequest req) throws org.apache.thrift.TException + { + get_valid_write_ids_args args = new get_valid_write_ids_args(); + args.setReq(req); + sendBase("get_valid_write_ids", args); + } + + public GetValidWriteIdsResult recv_get_valid_write_ids() throws org.apache.thrift.TException + { + get_valid_write_ids_result result = new get_valid_write_ids_result(); + receiveBase(result, "get_valid_write_ids"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_valid_write_ids failed: unknown result"); + } + } public static class AsyncClient extends com.facebook.fb303.FacebookService.AsyncClient implements AsyncIface { public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { @@ -10302,6 +10410,134 @@ public CacheFileMetadataResult getResult() throws org.apache.thrift.TException { } } + public void get_next_write_id(GetNextWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_next_write_id_call method_call = new get_next_write_id_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_next_write_id_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetNextWriteIdRequest req; + public get_next_write_id_call(GetNextWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_next_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_next_write_id_args args = new get_next_write_id_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public GetNextWriteIdResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_next_write_id(); + } + } + + public void finalize_write_id(FinalizeWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + finalize_write_id_call method_call = new finalize_write_id_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class finalize_write_id_call extends org.apache.thrift.async.TAsyncMethodCall { + private FinalizeWriteIdRequest req; + public finalize_write_id_call(FinalizeWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("finalize_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0)); + finalize_write_id_args args = new finalize_write_id_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public FinalizeWriteIdResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_finalize_write_id(); + } + } + + public void heartbeat_write_id(HeartbeatWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + heartbeat_write_id_call method_call = new heartbeat_write_id_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class heartbeat_write_id_call extends org.apache.thrift.async.TAsyncMethodCall { + private HeartbeatWriteIdRequest req; + public heartbeat_write_id_call(HeartbeatWriteIdRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("heartbeat_write_id", org.apache.thrift.protocol.TMessageType.CALL, 0)); + heartbeat_write_id_args args = new heartbeat_write_id_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public HeartbeatWriteIdResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_heartbeat_write_id(); + } + } + + public void get_valid_write_ids(GetValidWriteIdsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_valid_write_ids_call method_call = new get_valid_write_ids_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_valid_write_ids_call extends org.apache.thrift.async.TAsyncMethodCall { + private GetValidWriteIdsRequest req; + public get_valid_write_ids_call(GetValidWriteIdsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_valid_write_ids", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_valid_write_ids_args args = new get_valid_write_ids_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public GetValidWriteIdsResult getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_valid_write_ids(); + } + } + } public static class Processor extends com.facebook.fb303.FacebookService.Processor implements org.apache.thrift.TProcessor { @@ -10465,6 +10701,10 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public get_next_write_id() { + super("get_next_write_id"); + } + + public get_next_write_id_args getEmptyArgsInstance() { + return new get_next_write_id_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_next_write_id_result getResult(I iface, get_next_write_id_args args) throws org.apache.thrift.TException { + get_next_write_id_result result = new get_next_write_id_result(); + result.success = iface.get_next_write_id(args.req); + return result; + } + } + + public static class finalize_write_id extends org.apache.thrift.ProcessFunction { + public finalize_write_id() { + super("finalize_write_id"); + } + + public finalize_write_id_args getEmptyArgsInstance() { + return new finalize_write_id_args(); + } + + protected boolean isOneway() { + return false; + } + + public finalize_write_id_result getResult(I iface, finalize_write_id_args args) throws org.apache.thrift.TException { + finalize_write_id_result result = new finalize_write_id_result(); + result.success = iface.finalize_write_id(args.req); + return result; + } + } + + public static class heartbeat_write_id extends org.apache.thrift.ProcessFunction { + public heartbeat_write_id() { + super("heartbeat_write_id"); + } + + public heartbeat_write_id_args getEmptyArgsInstance() { + return new heartbeat_write_id_args(); + } + + protected boolean isOneway() { + return false; + } + + public heartbeat_write_id_result getResult(I iface, heartbeat_write_id_args args) throws org.apache.thrift.TException { + heartbeat_write_id_result result = new heartbeat_write_id_result(); + result.success = iface.heartbeat_write_id(args.req); + return result; + } + } + + public static class get_valid_write_ids extends org.apache.thrift.ProcessFunction { + public get_valid_write_ids() { + super("get_valid_write_ids"); + } + + public get_valid_write_ids_args getEmptyArgsInstance() { + return new get_valid_write_ids_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_valid_write_ids_result getResult(I iface, get_valid_write_ids_args args) throws org.apache.thrift.TException { + get_valid_write_ids_result result = new get_valid_write_ids_result(); + result.success = iface.get_valid_write_ids(args.req); + return result; + } + } + } public static class AsyncProcessor extends com.facebook.fb303.FacebookService.AsyncProcessor { @@ -14461,6 +14781,10 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { + public get_next_write_id() { + super("get_next_write_id"); + } + + public get_next_write_id_args getEmptyArgsInstance() { + return new get_next_write_id_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(GetNextWriteIdResult o) { + get_next_write_id_result result = new get_next_write_id_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_next_write_id_result result = new get_next_write_id_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_next_write_id_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_next_write_id(args.req,resultHandler); + } + } + + public static class finalize_write_id extends org.apache.thrift.AsyncProcessFunction { + public finalize_write_id() { + super("finalize_write_id"); + } + + public finalize_write_id_args getEmptyArgsInstance() { + return new finalize_write_id_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(FinalizeWriteIdResult o) { + finalize_write_id_result result = new finalize_write_id_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + finalize_write_id_result result = new finalize_write_id_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, finalize_write_id_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.finalize_write_id(args.req,resultHandler); + } + } + + public static class heartbeat_write_id extends org.apache.thrift.AsyncProcessFunction { + public heartbeat_write_id() { + super("heartbeat_write_id"); + } + + public heartbeat_write_id_args getEmptyArgsInstance() { + return new heartbeat_write_id_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(HeartbeatWriteIdResult o) { + heartbeat_write_id_result result = new heartbeat_write_id_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + heartbeat_write_id_result result = new heartbeat_write_id_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, heartbeat_write_id_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.heartbeat_write_id(args.req,resultHandler); + } + } + + public static class get_valid_write_ids extends org.apache.thrift.AsyncProcessFunction { + public get_valid_write_ids() { + super("get_valid_write_ids"); + } + + public get_valid_write_ids_args getEmptyArgsInstance() { + return new get_valid_write_ids_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(GetValidWriteIdsResult o) { + get_valid_write_ids_result result = new get_valid_write_ids_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_valid_write_ids_result result = new get_valid_write_ids_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_valid_write_ids_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_valid_write_ids(args.req,resultHandler); + } + } + } public static class getMetaConf_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { @@ -28995,13 +29523,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list642 = iprot.readListBegin(); - struct.success = new ArrayList(_list642.size); - String _elem643; - for (int _i644 = 0; _i644 < _list642.size; ++_i644) + org.apache.thrift.protocol.TList _list650 = iprot.readListBegin(); + struct.success = new ArrayList(_list650.size); + String _elem651; + for (int _i652 = 0; _i652 < _list650.size; ++_i652) { - _elem643 = iprot.readString(); - struct.success.add(_elem643); + _elem651 = iprot.readString(); + struct.success.add(_elem651); } iprot.readListEnd(); } @@ -29036,9 +29564,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter645 : struct.success) + for (String _iter653 : struct.success) { - oprot.writeString(_iter645); + oprot.writeString(_iter653); } oprot.writeListEnd(); } @@ -29077,9 +29605,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter646 : struct.success) + for (String _iter654 : struct.success) { - oprot.writeString(_iter646); + oprot.writeString(_iter654); } } } @@ -29094,13 +29622,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list647 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list647.size); - String _elem648; - for (int _i649 = 0; _i649 < _list647.size; ++_i649) + org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list655.size); + String _elem656; + for (int _i657 = 0; _i657 < _list655.size; ++_i657) { - _elem648 = iprot.readString(); - struct.success.add(_elem648); + _elem656 = iprot.readString(); + struct.success.add(_elem656); } } struct.setSuccessIsSet(true); @@ -29754,13 +30282,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list650 = iprot.readListBegin(); - struct.success = new ArrayList(_list650.size); - String _elem651; - for (int _i652 = 0; _i652 < _list650.size; ++_i652) + org.apache.thrift.protocol.TList _list658 = iprot.readListBegin(); + struct.success = new ArrayList(_list658.size); + String _elem659; + for (int _i660 = 0; _i660 < _list658.size; ++_i660) { - _elem651 = iprot.readString(); - struct.success.add(_elem651); + _elem659 = iprot.readString(); + struct.success.add(_elem659); } iprot.readListEnd(); } @@ -29795,9 +30323,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter653 : struct.success) + for (String _iter661 : struct.success) { - oprot.writeString(_iter653); + oprot.writeString(_iter661); } oprot.writeListEnd(); } @@ -29836,9 +30364,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter654 : struct.success) + for (String _iter662 : struct.success) { - oprot.writeString(_iter654); + oprot.writeString(_iter662); } } } @@ -29853,13 +30381,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list655.size); - String _elem656; - for (int _i657 = 0; _i657 < _list655.size; ++_i657) + org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list663.size); + String _elem664; + for (int _i665 = 0; _i665 < _list663.size; ++_i665) { - _elem656 = iprot.readString(); - struct.success.add(_elem656); + _elem664 = iprot.readString(); + struct.success.add(_elem664); } } struct.setSuccessIsSet(true); @@ -34466,16 +34994,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map658 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map658.size); - String _key659; - Type _val660; - for (int _i661 = 0; _i661 < _map658.size; ++_i661) + org.apache.thrift.protocol.TMap _map666 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map666.size); + String _key667; + Type _val668; + for (int _i669 = 0; _i669 < _map666.size; ++_i669) { - _key659 = iprot.readString(); - _val660 = new Type(); - _val660.read(iprot); - struct.success.put(_key659, _val660); + _key667 = iprot.readString(); + _val668 = new Type(); + _val668.read(iprot); + struct.success.put(_key667, _val668); } iprot.readMapEnd(); } @@ -34510,10 +35038,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter662 : struct.success.entrySet()) + for (Map.Entry _iter670 : struct.success.entrySet()) { - oprot.writeString(_iter662.getKey()); - _iter662.getValue().write(oprot); + oprot.writeString(_iter670.getKey()); + _iter670.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -34552,10 +35080,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter663 : struct.success.entrySet()) + for (Map.Entry _iter671 : struct.success.entrySet()) { - oprot.writeString(_iter663.getKey()); - _iter663.getValue().write(oprot); + oprot.writeString(_iter671.getKey()); + _iter671.getValue().write(oprot); } } } @@ -34570,16 +35098,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map664 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map664.size); - String _key665; - Type _val666; - for (int _i667 = 0; _i667 < _map664.size; ++_i667) + org.apache.thrift.protocol.TMap _map672 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map672.size); + String _key673; + Type _val674; + for (int _i675 = 0; _i675 < _map672.size; ++_i675) { - _key665 = iprot.readString(); - _val666 = new Type(); - _val666.read(iprot); - struct.success.put(_key665, _val666); + _key673 = iprot.readString(); + _val674 = new Type(); + _val674.read(iprot); + struct.success.put(_key673, _val674); } } struct.setSuccessIsSet(true); @@ -35614,14 +36142,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list668 = iprot.readListBegin(); - struct.success = new ArrayList(_list668.size); - FieldSchema _elem669; - for (int _i670 = 0; _i670 < _list668.size; ++_i670) + org.apache.thrift.protocol.TList _list676 = iprot.readListBegin(); + struct.success = new ArrayList(_list676.size); + FieldSchema _elem677; + for (int _i678 = 0; _i678 < _list676.size; ++_i678) { - _elem669 = new FieldSchema(); - _elem669.read(iprot); - struct.success.add(_elem669); + _elem677 = new FieldSchema(); + _elem677.read(iprot); + struct.success.add(_elem677); } iprot.readListEnd(); } @@ -35674,9 +36202,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter671 : struct.success) + for (FieldSchema _iter679 : struct.success) { - _iter671.write(oprot); + _iter679.write(oprot); } oprot.writeListEnd(); } @@ -35731,9 +36259,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter672 : struct.success) + for (FieldSchema _iter680 : struct.success) { - _iter672.write(oprot); + _iter680.write(oprot); } } } @@ -35754,14 +36282,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list673 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list673.size); - FieldSchema _elem674; - for (int _i675 = 0; _i675 < _list673.size; ++_i675) + org.apache.thrift.protocol.TList _list681 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list681.size); + FieldSchema _elem682; + for (int _i683 = 0; _i683 < _list681.size; ++_i683) { - _elem674 = new FieldSchema(); - _elem674.read(iprot); - struct.success.add(_elem674); + _elem682 = new FieldSchema(); + _elem682.read(iprot); + struct.success.add(_elem682); } } struct.setSuccessIsSet(true); @@ -36915,14 +37443,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list676 = iprot.readListBegin(); - struct.success = new ArrayList(_list676.size); - FieldSchema _elem677; - for (int _i678 = 0; _i678 < _list676.size; ++_i678) + org.apache.thrift.protocol.TList _list684 = iprot.readListBegin(); + struct.success = new ArrayList(_list684.size); + FieldSchema _elem685; + for (int _i686 = 0; _i686 < _list684.size; ++_i686) { - _elem677 = new FieldSchema(); - _elem677.read(iprot); - struct.success.add(_elem677); + _elem685 = new FieldSchema(); + _elem685.read(iprot); + struct.success.add(_elem685); } iprot.readListEnd(); } @@ -36975,9 +37503,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter679 : struct.success) + for (FieldSchema _iter687 : struct.success) { - _iter679.write(oprot); + _iter687.write(oprot); } oprot.writeListEnd(); } @@ -37032,9 +37560,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter680 : struct.success) + for (FieldSchema _iter688 : struct.success) { - _iter680.write(oprot); + _iter688.write(oprot); } } } @@ -37055,14 +37583,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list681 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list681.size); - FieldSchema _elem682; - for (int _i683 = 0; _i683 < _list681.size; ++_i683) + org.apache.thrift.protocol.TList _list689 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list689.size); + FieldSchema _elem690; + for (int _i691 = 0; _i691 < _list689.size; ++_i691) { - _elem682 = new FieldSchema(); - _elem682.read(iprot); - struct.success.add(_elem682); + _elem690 = new FieldSchema(); + _elem690.read(iprot); + struct.success.add(_elem690); } } struct.setSuccessIsSet(true); @@ -38107,14 +38635,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list684 = iprot.readListBegin(); - struct.success = new ArrayList(_list684.size); - FieldSchema _elem685; - for (int _i686 = 0; _i686 < _list684.size; ++_i686) + org.apache.thrift.protocol.TList _list692 = iprot.readListBegin(); + struct.success = new ArrayList(_list692.size); + FieldSchema _elem693; + for (int _i694 = 0; _i694 < _list692.size; ++_i694) { - _elem685 = new FieldSchema(); - _elem685.read(iprot); - struct.success.add(_elem685); + _elem693 = new FieldSchema(); + _elem693.read(iprot); + struct.success.add(_elem693); } iprot.readListEnd(); } @@ -38167,9 +38695,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter687 : struct.success) + for (FieldSchema _iter695 : struct.success) { - _iter687.write(oprot); + _iter695.write(oprot); } oprot.writeListEnd(); } @@ -38224,9 +38752,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter688 : struct.success) + for (FieldSchema _iter696 : struct.success) { - _iter688.write(oprot); + _iter696.write(oprot); } } } @@ -38247,14 +38775,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list689 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list689.size); - FieldSchema _elem690; - for (int _i691 = 0; _i691 < _list689.size; ++_i691) + org.apache.thrift.protocol.TList _list697 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list697.size); + FieldSchema _elem698; + for (int _i699 = 0; _i699 < _list697.size; ++_i699) { - _elem690 = new FieldSchema(); - _elem690.read(iprot); - struct.success.add(_elem690); + _elem698 = new FieldSchema(); + _elem698.read(iprot); + struct.success.add(_elem698); } } struct.setSuccessIsSet(true); @@ -39408,14 +39936,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list692 = iprot.readListBegin(); - struct.success = new ArrayList(_list692.size); - FieldSchema _elem693; - for (int _i694 = 0; _i694 < _list692.size; ++_i694) + org.apache.thrift.protocol.TList _list700 = iprot.readListBegin(); + struct.success = new ArrayList(_list700.size); + FieldSchema _elem701; + for (int _i702 = 0; _i702 < _list700.size; ++_i702) { - _elem693 = new FieldSchema(); - _elem693.read(iprot); - struct.success.add(_elem693); + _elem701 = new FieldSchema(); + _elem701.read(iprot); + struct.success.add(_elem701); } iprot.readListEnd(); } @@ -39468,9 +39996,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter695 : struct.success) + for (FieldSchema _iter703 : struct.success) { - _iter695.write(oprot); + _iter703.write(oprot); } oprot.writeListEnd(); } @@ -39525,9 +40053,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter696 : struct.success) + for (FieldSchema _iter704 : struct.success) { - _iter696.write(oprot); + _iter704.write(oprot); } } } @@ -39548,14 +40076,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list697 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list697.size); - FieldSchema _elem698; - for (int _i699 = 0; _i699 < _list697.size; ++_i699) + org.apache.thrift.protocol.TList _list705 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list705.size); + FieldSchema _elem706; + for (int _i707 = 0; _i707 < _list705.size; ++_i707) { - _elem698 = new FieldSchema(); - _elem698.read(iprot); - struct.success.add(_elem698); + _elem706 = new FieldSchema(); + _elem706.read(iprot); + struct.success.add(_elem706); } } struct.setSuccessIsSet(true); @@ -42280,14 +42808,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list700 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList(_list700.size); - SQLPrimaryKey _elem701; - for (int _i702 = 0; _i702 < _list700.size; ++_i702) + org.apache.thrift.protocol.TList _list708 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list708.size); + SQLPrimaryKey _elem709; + for (int _i710 = 0; _i710 < _list708.size; ++_i710) { - _elem701 = new SQLPrimaryKey(); - _elem701.read(iprot); - struct.primaryKeys.add(_elem701); + _elem709 = new SQLPrimaryKey(); + _elem709.read(iprot); + struct.primaryKeys.add(_elem709); } iprot.readListEnd(); } @@ -42299,14 +42827,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list703 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList(_list703.size); - SQLForeignKey _elem704; - for (int _i705 = 0; _i705 < _list703.size; ++_i705) + org.apache.thrift.protocol.TList _list711 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list711.size); + SQLForeignKey _elem712; + for (int _i713 = 0; _i713 < _list711.size; ++_i713) { - _elem704 = new SQLForeignKey(); - _elem704.read(iprot); - struct.foreignKeys.add(_elem704); + _elem712 = new SQLForeignKey(); + _elem712.read(iprot); + struct.foreignKeys.add(_elem712); } iprot.readListEnd(); } @@ -42337,9 +42865,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter706 : struct.primaryKeys) + for (SQLPrimaryKey _iter714 : struct.primaryKeys) { - _iter706.write(oprot); + _iter714.write(oprot); } oprot.writeListEnd(); } @@ -42349,9 +42877,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter707 : struct.foreignKeys) + for (SQLForeignKey _iter715 : struct.foreignKeys) { - _iter707.write(oprot); + _iter715.write(oprot); } oprot.writeListEnd(); } @@ -42391,18 +42919,18 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter708 : struct.primaryKeys) + for (SQLPrimaryKey _iter716 : struct.primaryKeys) { - _iter708.write(oprot); + _iter716.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter709 : struct.foreignKeys) + for (SQLForeignKey _iter717 : struct.foreignKeys) { - _iter709.write(oprot); + _iter717.write(oprot); } } } @@ -42419,28 +42947,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list710 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList(_list710.size); - SQLPrimaryKey _elem711; - for (int _i712 = 0; _i712 < _list710.size; ++_i712) + org.apache.thrift.protocol.TList _list718 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list718.size); + SQLPrimaryKey _elem719; + for (int _i720 = 0; _i720 < _list718.size; ++_i720) { - _elem711 = new SQLPrimaryKey(); - _elem711.read(iprot); - struct.primaryKeys.add(_elem711); + _elem719 = new SQLPrimaryKey(); + _elem719.read(iprot); + struct.primaryKeys.add(_elem719); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList(_list713.size); - SQLForeignKey _elem714; - for (int _i715 = 0; _i715 < _list713.size; ++_i715) + org.apache.thrift.protocol.TList _list721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list721.size); + SQLForeignKey _elem722; + for (int _i723 = 0; _i723 < _list721.size; ++_i723) { - _elem714 = new SQLForeignKey(); - _elem714.read(iprot); - struct.foreignKeys.add(_elem714); + _elem722 = new SQLForeignKey(); + _elem722.read(iprot); + struct.foreignKeys.add(_elem722); } } struct.setForeignKeysIsSet(true); @@ -48639,13 +49167,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list716 = iprot.readListBegin(); - struct.success = new ArrayList(_list716.size); - String _elem717; - for (int _i718 = 0; _i718 < _list716.size; ++_i718) + org.apache.thrift.protocol.TList _list724 = iprot.readListBegin(); + struct.success = new ArrayList(_list724.size); + String _elem725; + for (int _i726 = 0; _i726 < _list724.size; ++_i726) { - _elem717 = iprot.readString(); - struct.success.add(_elem717); + _elem725 = iprot.readString(); + struct.success.add(_elem725); } iprot.readListEnd(); } @@ -48680,9 +49208,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter719 : struct.success) + for (String _iter727 : struct.success) { - oprot.writeString(_iter719); + oprot.writeString(_iter727); } oprot.writeListEnd(); } @@ -48721,9 +49249,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter720 : struct.success) + for (String _iter728 : struct.success) { - oprot.writeString(_iter720); + oprot.writeString(_iter728); } } } @@ -48738,13 +49266,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list721 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list721.size); - String _elem722; - for (int _i723 = 0; _i723 < _list721.size; ++_i723) + org.apache.thrift.protocol.TList _list729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list729.size); + String _elem730; + for (int _i731 = 0; _i731 < _list729.size; ++_i731) { - _elem722 = iprot.readString(); - struct.success.add(_elem722); + _elem730 = iprot.readString(); + struct.success.add(_elem730); } } struct.setSuccessIsSet(true); @@ -49718,13 +50246,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list724 = iprot.readListBegin(); - struct.success = new ArrayList(_list724.size); - String _elem725; - for (int _i726 = 0; _i726 < _list724.size; ++_i726) + org.apache.thrift.protocol.TList _list732 = iprot.readListBegin(); + struct.success = new ArrayList(_list732.size); + String _elem733; + for (int _i734 = 0; _i734 < _list732.size; ++_i734) { - _elem725 = iprot.readString(); - struct.success.add(_elem725); + _elem733 = iprot.readString(); + struct.success.add(_elem733); } iprot.readListEnd(); } @@ -49759,9 +50287,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter727 : struct.success) + for (String _iter735 : struct.success) { - oprot.writeString(_iter727); + oprot.writeString(_iter735); } oprot.writeListEnd(); } @@ -49800,9 +50328,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter728 : struct.success) + for (String _iter736 : struct.success) { - oprot.writeString(_iter728); + oprot.writeString(_iter736); } } } @@ -49817,13 +50345,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list729.size); - String _elem730; - for (int _i731 = 0; _i731 < _list729.size; ++_i731) + org.apache.thrift.protocol.TList _list737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list737.size); + String _elem738; + for (int _i739 = 0; _i739 < _list737.size; ++_i739) { - _elem730 = iprot.readString(); - struct.success.add(_elem730); + _elem738 = iprot.readString(); + struct.success.add(_elem738); } } struct.setSuccessIsSet(true); @@ -50328,13 +50856,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list732 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list732.size); - String _elem733; - for (int _i734 = 0; _i734 < _list732.size; ++_i734) + org.apache.thrift.protocol.TList _list740 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list740.size); + String _elem741; + for (int _i742 = 0; _i742 < _list740.size; ++_i742) { - _elem733 = iprot.readString(); - struct.tbl_types.add(_elem733); + _elem741 = iprot.readString(); + struct.tbl_types.add(_elem741); } iprot.readListEnd(); } @@ -50370,9 +50898,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter735 : struct.tbl_types) + for (String _iter743 : struct.tbl_types) { - oprot.writeString(_iter735); + oprot.writeString(_iter743); } oprot.writeListEnd(); } @@ -50415,9 +50943,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter736 : struct.tbl_types) + for (String _iter744 : struct.tbl_types) { - oprot.writeString(_iter736); + oprot.writeString(_iter744); } } } @@ -50437,13 +50965,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list737 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list737.size); - String _elem738; - for (int _i739 = 0; _i739 < _list737.size; ++_i739) + org.apache.thrift.protocol.TList _list745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list745.size); + String _elem746; + for (int _i747 = 0; _i747 < _list745.size; ++_i747) { - _elem738 = iprot.readString(); - struct.tbl_types.add(_elem738); + _elem746 = iprot.readString(); + struct.tbl_types.add(_elem746); } } struct.setTbl_typesIsSet(true); @@ -50849,14 +51377,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list740 = iprot.readListBegin(); - struct.success = new ArrayList(_list740.size); - TableMeta _elem741; - for (int _i742 = 0; _i742 < _list740.size; ++_i742) + org.apache.thrift.protocol.TList _list748 = iprot.readListBegin(); + struct.success = new ArrayList(_list748.size); + TableMeta _elem749; + for (int _i750 = 0; _i750 < _list748.size; ++_i750) { - _elem741 = new TableMeta(); - _elem741.read(iprot); - struct.success.add(_elem741); + _elem749 = new TableMeta(); + _elem749.read(iprot); + struct.success.add(_elem749); } iprot.readListEnd(); } @@ -50891,9 +51419,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter743 : struct.success) + for (TableMeta _iter751 : struct.success) { - _iter743.write(oprot); + _iter751.write(oprot); } oprot.writeListEnd(); } @@ -50932,9 +51460,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter744 : struct.success) + for (TableMeta _iter752 : struct.success) { - _iter744.write(oprot); + _iter752.write(oprot); } } } @@ -50949,14 +51477,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list745 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list745.size); - TableMeta _elem746; - for (int _i747 = 0; _i747 < _list745.size; ++_i747) + org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list753.size); + TableMeta _elem754; + for (int _i755 = 0; _i755 < _list753.size; ++_i755) { - _elem746 = new TableMeta(); - _elem746.read(iprot); - struct.success.add(_elem746); + _elem754 = new TableMeta(); + _elem754.read(iprot); + struct.success.add(_elem754); } } struct.setSuccessIsSet(true); @@ -51722,13 +52250,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list748 = iprot.readListBegin(); - struct.success = new ArrayList(_list748.size); - String _elem749; - for (int _i750 = 0; _i750 < _list748.size; ++_i750) + org.apache.thrift.protocol.TList _list756 = iprot.readListBegin(); + struct.success = new ArrayList(_list756.size); + String _elem757; + for (int _i758 = 0; _i758 < _list756.size; ++_i758) { - _elem749 = iprot.readString(); - struct.success.add(_elem749); + _elem757 = iprot.readString(); + struct.success.add(_elem757); } iprot.readListEnd(); } @@ -51763,9 +52291,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter751 : struct.success) + for (String _iter759 : struct.success) { - oprot.writeString(_iter751); + oprot.writeString(_iter759); } oprot.writeListEnd(); } @@ -51804,9 +52332,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter752 : struct.success) + for (String _iter760 : struct.success) { - oprot.writeString(_iter752); + oprot.writeString(_iter760); } } } @@ -51821,13 +52349,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list753.size); - String _elem754; - for (int _i755 = 0; _i755 < _list753.size; ++_i755) + org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list761.size); + String _elem762; + for (int _i763 = 0; _i763 < _list761.size; ++_i763) { - _elem754 = iprot.readString(); - struct.success.add(_elem754); + _elem762 = iprot.readString(); + struct.success.add(_elem762); } } struct.setSuccessIsSet(true); @@ -53280,13 +53808,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list756 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list756.size); - String _elem757; - for (int _i758 = 0; _i758 < _list756.size; ++_i758) + org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list764.size); + String _elem765; + for (int _i766 = 0; _i766 < _list764.size; ++_i766) { - _elem757 = iprot.readString(); - struct.tbl_names.add(_elem757); + _elem765 = iprot.readString(); + struct.tbl_names.add(_elem765); } iprot.readListEnd(); } @@ -53317,9 +53845,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter759 : struct.tbl_names) + for (String _iter767 : struct.tbl_names) { - oprot.writeString(_iter759); + oprot.writeString(_iter767); } oprot.writeListEnd(); } @@ -53356,9 +53884,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter760 : struct.tbl_names) + for (String _iter768 : struct.tbl_names) { - oprot.writeString(_iter760); + oprot.writeString(_iter768); } } } @@ -53374,13 +53902,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list761.size); - String _elem762; - for (int _i763 = 0; _i763 < _list761.size; ++_i763) + org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list769.size); + String _elem770; + for (int _i771 = 0; _i771 < _list769.size; ++_i771) { - _elem762 = iprot.readString(); - struct.tbl_names.add(_elem762); + _elem770 = iprot.readString(); + struct.tbl_names.add(_elem770); } } struct.setTbl_namesIsSet(true); @@ -53948,14 +54476,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list764 = iprot.readListBegin(); - struct.success = new ArrayList

(_list764.size); - Table _elem765; - for (int _i766 = 0; _i766 < _list764.size; ++_i766) + org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); + struct.success = new ArrayList
(_list772.size); + Table _elem773; + for (int _i774 = 0; _i774 < _list772.size; ++_i774) { - _elem765 = new Table(); - _elem765.read(iprot); - struct.success.add(_elem765); + _elem773 = new Table(); + _elem773.read(iprot); + struct.success.add(_elem773); } iprot.readListEnd(); } @@ -54008,9 +54536,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter767 : struct.success) + for (Table _iter775 : struct.success) { - _iter767.write(oprot); + _iter775.write(oprot); } oprot.writeListEnd(); } @@ -54065,9 +54593,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter768 : struct.success) + for (Table _iter776 : struct.success) { - _iter768.write(oprot); + _iter776.write(oprot); } } } @@ -54088,14 +54616,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list769 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list769.size); - Table _elem770; - for (int _i771 = 0; _i771 < _list769.size; ++_i771) + org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list777.size); + Table _elem778; + for (int _i779 = 0; _i779 < _list777.size; ++_i779) { - _elem770 = new Table(); - _elem770.read(iprot); - struct.success.add(_elem770); + _elem778 = new Table(); + _elem778.read(iprot); + struct.success.add(_elem778); } } struct.setSuccessIsSet(true); @@ -55241,13 +55769,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list772 = iprot.readListBegin(); - struct.success = new ArrayList(_list772.size); - String _elem773; - for (int _i774 = 0; _i774 < _list772.size; ++_i774) + org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); + struct.success = new ArrayList(_list780.size); + String _elem781; + for (int _i782 = 0; _i782 < _list780.size; ++_i782) { - _elem773 = iprot.readString(); - struct.success.add(_elem773); + _elem781 = iprot.readString(); + struct.success.add(_elem781); } iprot.readListEnd(); } @@ -55300,9 +55828,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter775 : struct.success) + for (String _iter783 : struct.success) { - oprot.writeString(_iter775); + oprot.writeString(_iter783); } oprot.writeListEnd(); } @@ -55357,9 +55885,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter776 : struct.success) + for (String _iter784 : struct.success) { - oprot.writeString(_iter776); + oprot.writeString(_iter784); } } } @@ -55380,13 +55908,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list777 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list777.size); - String _elem778; - for (int _i779 = 0; _i779 < _list777.size; ++_i779) + org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list785.size); + String _elem786; + for (int _i787 = 0; _i787 < _list785.size; ++_i787) { - _elem778 = iprot.readString(); - struct.success.add(_elem778); + _elem786 = iprot.readString(); + struct.success.add(_elem786); } } struct.setSuccessIsSet(true); @@ -61245,14 +61773,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list780.size); - Partition _elem781; - for (int _i782 = 0; _i782 < _list780.size; ++_i782) + org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list788.size); + Partition _elem789; + for (int _i790 = 0; _i790 < _list788.size; ++_i790) { - _elem781 = new Partition(); - _elem781.read(iprot); - struct.new_parts.add(_elem781); + _elem789 = new Partition(); + _elem789.read(iprot); + struct.new_parts.add(_elem789); } iprot.readListEnd(); } @@ -61278,9 +61806,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter783 : struct.new_parts) + for (Partition _iter791 : struct.new_parts) { - _iter783.write(oprot); + _iter791.write(oprot); } oprot.writeListEnd(); } @@ -61311,9 +61839,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter784 : struct.new_parts) + for (Partition _iter792 : struct.new_parts) { - _iter784.write(oprot); + _iter792.write(oprot); } } } @@ -61325,14 +61853,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list785.size); - Partition _elem786; - for (int _i787 = 0; _i787 < _list785.size; ++_i787) + org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list793.size); + Partition _elem794; + for (int _i795 = 0; _i795 < _list793.size; ++_i795) { - _elem786 = new Partition(); - _elem786.read(iprot); - struct.new_parts.add(_elem786); + _elem794 = new Partition(); + _elem794.read(iprot); + struct.new_parts.add(_elem794); } } struct.setNew_partsIsSet(true); @@ -62333,14 +62861,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list788.size); - PartitionSpec _elem789; - for (int _i790 = 0; _i790 < _list788.size; ++_i790) + org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list796.size); + PartitionSpec _elem797; + for (int _i798 = 0; _i798 < _list796.size; ++_i798) { - _elem789 = new PartitionSpec(); - _elem789.read(iprot); - struct.new_parts.add(_elem789); + _elem797 = new PartitionSpec(); + _elem797.read(iprot); + struct.new_parts.add(_elem797); } iprot.readListEnd(); } @@ -62366,9 +62894,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter791 : struct.new_parts) + for (PartitionSpec _iter799 : struct.new_parts) { - _iter791.write(oprot); + _iter799.write(oprot); } oprot.writeListEnd(); } @@ -62399,9 +62927,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter792 : struct.new_parts) + for (PartitionSpec _iter800 : struct.new_parts) { - _iter792.write(oprot); + _iter800.write(oprot); } } } @@ -62413,14 +62941,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list793.size); - PartitionSpec _elem794; - for (int _i795 = 0; _i795 < _list793.size; ++_i795) + org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list801.size); + PartitionSpec _elem802; + for (int _i803 = 0; _i803 < _list801.size; ++_i803) { - _elem794 = new PartitionSpec(); - _elem794.read(iprot); - struct.new_parts.add(_elem794); + _elem802 = new PartitionSpec(); + _elem802.read(iprot); + struct.new_parts.add(_elem802); } } struct.setNew_partsIsSet(true); @@ -63596,13 +64124,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list796.size); - String _elem797; - for (int _i798 = 0; _i798 < _list796.size; ++_i798) + org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list804.size); + String _elem805; + for (int _i806 = 0; _i806 < _list804.size; ++_i806) { - _elem797 = iprot.readString(); - struct.part_vals.add(_elem797); + _elem805 = iprot.readString(); + struct.part_vals.add(_elem805); } iprot.readListEnd(); } @@ -63638,9 +64166,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter799 : struct.part_vals) + for (String _iter807 : struct.part_vals) { - oprot.writeString(_iter799); + oprot.writeString(_iter807); } oprot.writeListEnd(); } @@ -63683,9 +64211,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter800 : struct.part_vals) + for (String _iter808 : struct.part_vals) { - oprot.writeString(_iter800); + oprot.writeString(_iter808); } } } @@ -63705,13 +64233,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list801.size); - String _elem802; - for (int _i803 = 0; _i803 < _list801.size; ++_i803) + org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list809.size); + String _elem810; + for (int _i811 = 0; _i811 < _list809.size; ++_i811) { - _elem802 = iprot.readString(); - struct.part_vals.add(_elem802); + _elem810 = iprot.readString(); + struct.part_vals.add(_elem810); } } struct.setPart_valsIsSet(true); @@ -66020,13 +66548,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list804.size); - String _elem805; - for (int _i806 = 0; _i806 < _list804.size; ++_i806) + org.apache.thrift.protocol.TList _list812 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list812.size); + String _elem813; + for (int _i814 = 0; _i814 < _list812.size; ++_i814) { - _elem805 = iprot.readString(); - struct.part_vals.add(_elem805); + _elem813 = iprot.readString(); + struct.part_vals.add(_elem813); } iprot.readListEnd(); } @@ -66071,9 +66599,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter807 : struct.part_vals) + for (String _iter815 : struct.part_vals) { - oprot.writeString(_iter807); + oprot.writeString(_iter815); } oprot.writeListEnd(); } @@ -66124,9 +66652,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter808 : struct.part_vals) + for (String _iter816 : struct.part_vals) { - oprot.writeString(_iter808); + oprot.writeString(_iter816); } } } @@ -66149,13 +66677,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list809.size); - String _elem810; - for (int _i811 = 0; _i811 < _list809.size; ++_i811) + org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list817.size); + String _elem818; + for (int _i819 = 0; _i819 < _list817.size; ++_i819) { - _elem810 = iprot.readString(); - struct.part_vals.add(_elem810); + _elem818 = iprot.readString(); + struct.part_vals.add(_elem818); } } struct.setPart_valsIsSet(true); @@ -70025,13 +70553,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list812 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list812.size); - String _elem813; - for (int _i814 = 0; _i814 < _list812.size; ++_i814) + org.apache.thrift.protocol.TList _list820 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list820.size); + String _elem821; + for (int _i822 = 0; _i822 < _list820.size; ++_i822) { - _elem813 = iprot.readString(); - struct.part_vals.add(_elem813); + _elem821 = iprot.readString(); + struct.part_vals.add(_elem821); } iprot.readListEnd(); } @@ -70075,9 +70603,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter815 : struct.part_vals) + for (String _iter823 : struct.part_vals) { - oprot.writeString(_iter815); + oprot.writeString(_iter823); } oprot.writeListEnd(); } @@ -70126,9 +70654,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter816 : struct.part_vals) + for (String _iter824 : struct.part_vals) { - oprot.writeString(_iter816); + oprot.writeString(_iter824); } } } @@ -70151,13 +70679,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list817.size); - String _elem818; - for (int _i819 = 0; _i819 < _list817.size; ++_i819) + org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list825.size); + String _elem826; + for (int _i827 = 0; _i827 < _list825.size; ++_i827) { - _elem818 = iprot.readString(); - struct.part_vals.add(_elem818); + _elem826 = iprot.readString(); + struct.part_vals.add(_elem826); } } struct.setPart_valsIsSet(true); @@ -71396,13 +71924,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list820 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list820.size); - String _elem821; - for (int _i822 = 0; _i822 < _list820.size; ++_i822) + org.apache.thrift.protocol.TList _list828 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list828.size); + String _elem829; + for (int _i830 = 0; _i830 < _list828.size; ++_i830) { - _elem821 = iprot.readString(); - struct.part_vals.add(_elem821); + _elem829 = iprot.readString(); + struct.part_vals.add(_elem829); } iprot.readListEnd(); } @@ -71455,9 +71983,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter823 : struct.part_vals) + for (String _iter831 : struct.part_vals) { - oprot.writeString(_iter823); + oprot.writeString(_iter831); } oprot.writeListEnd(); } @@ -71514,9 +72042,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter824 : struct.part_vals) + for (String _iter832 : struct.part_vals) { - oprot.writeString(_iter824); + oprot.writeString(_iter832); } } } @@ -71542,13 +72070,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list825.size); - String _elem826; - for (int _i827 = 0; _i827 < _list825.size; ++_i827) + org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list833.size); + String _elem834; + for (int _i835 = 0; _i835 < _list833.size; ++_i835) { - _elem826 = iprot.readString(); - struct.part_vals.add(_elem826); + _elem834 = iprot.readString(); + struct.part_vals.add(_elem834); } } struct.setPart_valsIsSet(true); @@ -76150,13 +76678,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list828 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list828.size); - String _elem829; - for (int _i830 = 0; _i830 < _list828.size; ++_i830) + org.apache.thrift.protocol.TList _list836 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list836.size); + String _elem837; + for (int _i838 = 0; _i838 < _list836.size; ++_i838) { - _elem829 = iprot.readString(); - struct.part_vals.add(_elem829); + _elem837 = iprot.readString(); + struct.part_vals.add(_elem837); } iprot.readListEnd(); } @@ -76192,9 +76720,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter831 : struct.part_vals) + for (String _iter839 : struct.part_vals) { - oprot.writeString(_iter831); + oprot.writeString(_iter839); } oprot.writeListEnd(); } @@ -76237,9 +76765,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter832 : struct.part_vals) + for (String _iter840 : struct.part_vals) { - oprot.writeString(_iter832); + oprot.writeString(_iter840); } } } @@ -76259,13 +76787,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list833.size); - String _elem834; - for (int _i835 = 0; _i835 < _list833.size; ++_i835) + org.apache.thrift.protocol.TList _list841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list841.size); + String _elem842; + for (int _i843 = 0; _i843 < _list841.size; ++_i843) { - _elem834 = iprot.readString(); - struct.part_vals.add(_elem834); + _elem842 = iprot.readString(); + struct.part_vals.add(_elem842); } } struct.setPart_valsIsSet(true); @@ -77483,15 +78011,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map836 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map836.size); - String _key837; - String _val838; - for (int _i839 = 0; _i839 < _map836.size; ++_i839) + org.apache.thrift.protocol.TMap _map844 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map844.size); + String _key845; + String _val846; + for (int _i847 = 0; _i847 < _map844.size; ++_i847) { - _key837 = iprot.readString(); - _val838 = iprot.readString(); - struct.partitionSpecs.put(_key837, _val838); + _key845 = iprot.readString(); + _val846 = iprot.readString(); + struct.partitionSpecs.put(_key845, _val846); } iprot.readMapEnd(); } @@ -77549,10 +78077,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter840 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter848 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter840.getKey()); - oprot.writeString(_iter840.getValue()); + oprot.writeString(_iter848.getKey()); + oprot.writeString(_iter848.getValue()); } oprot.writeMapEnd(); } @@ -77615,10 +78143,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter841 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter849 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter841.getKey()); - oprot.writeString(_iter841.getValue()); + oprot.writeString(_iter849.getKey()); + oprot.writeString(_iter849.getValue()); } } } @@ -77642,15 +78170,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map842 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map842.size); - String _key843; - String _val844; - for (int _i845 = 0; _i845 < _map842.size; ++_i845) + org.apache.thrift.protocol.TMap _map850 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map850.size); + String _key851; + String _val852; + for (int _i853 = 0; _i853 < _map850.size; ++_i853) { - _key843 = iprot.readString(); - _val844 = iprot.readString(); - struct.partitionSpecs.put(_key843, _val844); + _key851 = iprot.readString(); + _val852 = iprot.readString(); + struct.partitionSpecs.put(_key851, _val852); } } struct.setPartitionSpecsIsSet(true); @@ -79096,15 +79624,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map846 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map846.size); - String _key847; - String _val848; - for (int _i849 = 0; _i849 < _map846.size; ++_i849) + org.apache.thrift.protocol.TMap _map854 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map854.size); + String _key855; + String _val856; + for (int _i857 = 0; _i857 < _map854.size; ++_i857) { - _key847 = iprot.readString(); - _val848 = iprot.readString(); - struct.partitionSpecs.put(_key847, _val848); + _key855 = iprot.readString(); + _val856 = iprot.readString(); + struct.partitionSpecs.put(_key855, _val856); } iprot.readMapEnd(); } @@ -79162,10 +79690,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter850 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter858 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter850.getKey()); - oprot.writeString(_iter850.getValue()); + oprot.writeString(_iter858.getKey()); + oprot.writeString(_iter858.getValue()); } oprot.writeMapEnd(); } @@ -79228,10 +79756,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter851 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter859 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter851.getKey()); - oprot.writeString(_iter851.getValue()); + oprot.writeString(_iter859.getKey()); + oprot.writeString(_iter859.getValue()); } } } @@ -79255,15 +79783,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map852 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map852.size); - String _key853; - String _val854; - for (int _i855 = 0; _i855 < _map852.size; ++_i855) + org.apache.thrift.protocol.TMap _map860 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map860.size); + String _key861; + String _val862; + for (int _i863 = 0; _i863 < _map860.size; ++_i863) { - _key853 = iprot.readString(); - _val854 = iprot.readString(); - struct.partitionSpecs.put(_key853, _val854); + _key861 = iprot.readString(); + _val862 = iprot.readString(); + struct.partitionSpecs.put(_key861, _val862); } } struct.setPartitionSpecsIsSet(true); @@ -79928,14 +80456,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list856 = iprot.readListBegin(); - struct.success = new ArrayList(_list856.size); - Partition _elem857; - for (int _i858 = 0; _i858 < _list856.size; ++_i858) + org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); + struct.success = new ArrayList(_list864.size); + Partition _elem865; + for (int _i866 = 0; _i866 < _list864.size; ++_i866) { - _elem857 = new Partition(); - _elem857.read(iprot); - struct.success.add(_elem857); + _elem865 = new Partition(); + _elem865.read(iprot); + struct.success.add(_elem865); } iprot.readListEnd(); } @@ -79997,9 +80525,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter859 : struct.success) + for (Partition _iter867 : struct.success) { - _iter859.write(oprot); + _iter867.write(oprot); } oprot.writeListEnd(); } @@ -80062,9 +80590,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter860 : struct.success) + for (Partition _iter868 : struct.success) { - _iter860.write(oprot); + _iter868.write(oprot); } } } @@ -80088,14 +80616,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list861.size); - Partition _elem862; - for (int _i863 = 0; _i863 < _list861.size; ++_i863) + org.apache.thrift.protocol.TList _list869 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list869.size); + Partition _elem870; + for (int _i871 = 0; _i871 < _list869.size; ++_i871) { - _elem862 = new Partition(); - _elem862.read(iprot); - struct.success.add(_elem862); + _elem870 = new Partition(); + _elem870.read(iprot); + struct.success.add(_elem870); } } struct.setSuccessIsSet(true); @@ -80794,13 +81322,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list864 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list864.size); - String _elem865; - for (int _i866 = 0; _i866 < _list864.size; ++_i866) + org.apache.thrift.protocol.TList _list872 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list872.size); + String _elem873; + for (int _i874 = 0; _i874 < _list872.size; ++_i874) { - _elem865 = iprot.readString(); - struct.part_vals.add(_elem865); + _elem873 = iprot.readString(); + struct.part_vals.add(_elem873); } iprot.readListEnd(); } @@ -80820,13 +81348,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list867 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list867.size); - String _elem868; - for (int _i869 = 0; _i869 < _list867.size; ++_i869) + org.apache.thrift.protocol.TList _list875 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list875.size); + String _elem876; + for (int _i877 = 0; _i877 < _list875.size; ++_i877) { - _elem868 = iprot.readString(); - struct.group_names.add(_elem868); + _elem876 = iprot.readString(); + struct.group_names.add(_elem876); } iprot.readListEnd(); } @@ -80862,9 +81390,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter870 : struct.part_vals) + for (String _iter878 : struct.part_vals) { - oprot.writeString(_iter870); + oprot.writeString(_iter878); } oprot.writeListEnd(); } @@ -80879,9 +81407,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter871 : struct.group_names) + for (String _iter879 : struct.group_names) { - oprot.writeString(_iter871); + oprot.writeString(_iter879); } oprot.writeListEnd(); } @@ -80930,9 +81458,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter872 : struct.part_vals) + for (String _iter880 : struct.part_vals) { - oprot.writeString(_iter872); + oprot.writeString(_iter880); } } } @@ -80942,9 +81470,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter873 : struct.group_names) + for (String _iter881 : struct.group_names) { - oprot.writeString(_iter873); + oprot.writeString(_iter881); } } } @@ -80964,13 +81492,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list874 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list874.size); - String _elem875; - for (int _i876 = 0; _i876 < _list874.size; ++_i876) + org.apache.thrift.protocol.TList _list882 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list882.size); + String _elem883; + for (int _i884 = 0; _i884 < _list882.size; ++_i884) { - _elem875 = iprot.readString(); - struct.part_vals.add(_elem875); + _elem883 = iprot.readString(); + struct.part_vals.add(_elem883); } } struct.setPart_valsIsSet(true); @@ -80981,13 +81509,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list877 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list877.size); - String _elem878; - for (int _i879 = 0; _i879 < _list877.size; ++_i879) + org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list885.size); + String _elem886; + for (int _i887 = 0; _i887 < _list885.size; ++_i887) { - _elem878 = iprot.readString(); - struct.group_names.add(_elem878); + _elem886 = iprot.readString(); + struct.group_names.add(_elem886); } } struct.setGroup_namesIsSet(true); @@ -83756,14 +84284,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list880 = iprot.readListBegin(); - struct.success = new ArrayList(_list880.size); - Partition _elem881; - for (int _i882 = 0; _i882 < _list880.size; ++_i882) + org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); + struct.success = new ArrayList(_list888.size); + Partition _elem889; + for (int _i890 = 0; _i890 < _list888.size; ++_i890) { - _elem881 = new Partition(); - _elem881.read(iprot); - struct.success.add(_elem881); + _elem889 = new Partition(); + _elem889.read(iprot); + struct.success.add(_elem889); } iprot.readListEnd(); } @@ -83807,9 +84335,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter883 : struct.success) + for (Partition _iter891 : struct.success) { - _iter883.write(oprot); + _iter891.write(oprot); } oprot.writeListEnd(); } @@ -83856,9 +84384,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter884 : struct.success) + for (Partition _iter892 : struct.success) { - _iter884.write(oprot); + _iter892.write(oprot); } } } @@ -83876,14 +84404,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list885 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list885.size); - Partition _elem886; - for (int _i887 = 0; _i887 < _list885.size; ++_i887) + org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list893.size); + Partition _elem894; + for (int _i895 = 0; _i895 < _list893.size; ++_i895) { - _elem886 = new Partition(); - _elem886.read(iprot); - struct.success.add(_elem886); + _elem894 = new Partition(); + _elem894.read(iprot); + struct.success.add(_elem894); } } struct.setSuccessIsSet(true); @@ -84573,13 +85101,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list888 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list888.size); - String _elem889; - for (int _i890 = 0; _i890 < _list888.size; ++_i890) + org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list896.size); + String _elem897; + for (int _i898 = 0; _i898 < _list896.size; ++_i898) { - _elem889 = iprot.readString(); - struct.group_names.add(_elem889); + _elem897 = iprot.readString(); + struct.group_names.add(_elem897); } iprot.readListEnd(); } @@ -84623,9 +85151,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter891 : struct.group_names) + for (String _iter899 : struct.group_names) { - oprot.writeString(_iter891); + oprot.writeString(_iter899); } oprot.writeListEnd(); } @@ -84680,9 +85208,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter892 : struct.group_names) + for (String _iter900 : struct.group_names) { - oprot.writeString(_iter892); + oprot.writeString(_iter900); } } } @@ -84710,13 +85238,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list893 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list893.size); - String _elem894; - for (int _i895 = 0; _i895 < _list893.size; ++_i895) + org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list901.size); + String _elem902; + for (int _i903 = 0; _i903 < _list901.size; ++_i903) { - _elem894 = iprot.readString(); - struct.group_names.add(_elem894); + _elem902 = iprot.readString(); + struct.group_names.add(_elem902); } } struct.setGroup_namesIsSet(true); @@ -85203,14 +85731,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list896 = iprot.readListBegin(); - struct.success = new ArrayList(_list896.size); - Partition _elem897; - for (int _i898 = 0; _i898 < _list896.size; ++_i898) + org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); + struct.success = new ArrayList(_list904.size); + Partition _elem905; + for (int _i906 = 0; _i906 < _list904.size; ++_i906) { - _elem897 = new Partition(); - _elem897.read(iprot); - struct.success.add(_elem897); + _elem905 = new Partition(); + _elem905.read(iprot); + struct.success.add(_elem905); } iprot.readListEnd(); } @@ -85254,9 +85782,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter899 : struct.success) + for (Partition _iter907 : struct.success) { - _iter899.write(oprot); + _iter907.write(oprot); } oprot.writeListEnd(); } @@ -85303,9 +85831,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter900 : struct.success) + for (Partition _iter908 : struct.success) { - _iter900.write(oprot); + _iter908.write(oprot); } } } @@ -85323,14 +85851,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list901 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list901.size); - Partition _elem902; - for (int _i903 = 0; _i903 < _list901.size; ++_i903) + org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list909.size); + Partition _elem910; + for (int _i911 = 0; _i911 < _list909.size; ++_i911) { - _elem902 = new Partition(); - _elem902.read(iprot); - struct.success.add(_elem902); + _elem910 = new Partition(); + _elem910.read(iprot); + struct.success.add(_elem910); } } struct.setSuccessIsSet(true); @@ -86393,14 +86921,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list904 = iprot.readListBegin(); - struct.success = new ArrayList(_list904.size); - PartitionSpec _elem905; - for (int _i906 = 0; _i906 < _list904.size; ++_i906) + org.apache.thrift.protocol.TList _list912 = iprot.readListBegin(); + struct.success = new ArrayList(_list912.size); + PartitionSpec _elem913; + for (int _i914 = 0; _i914 < _list912.size; ++_i914) { - _elem905 = new PartitionSpec(); - _elem905.read(iprot); - struct.success.add(_elem905); + _elem913 = new PartitionSpec(); + _elem913.read(iprot); + struct.success.add(_elem913); } iprot.readListEnd(); } @@ -86444,9 +86972,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter907 : struct.success) + for (PartitionSpec _iter915 : struct.success) { - _iter907.write(oprot); + _iter915.write(oprot); } oprot.writeListEnd(); } @@ -86493,9 +87021,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter908 : struct.success) + for (PartitionSpec _iter916 : struct.success) { - _iter908.write(oprot); + _iter916.write(oprot); } } } @@ -86513,14 +87041,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list909 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list909.size); - PartitionSpec _elem910; - for (int _i911 = 0; _i911 < _list909.size; ++_i911) + org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list917.size); + PartitionSpec _elem918; + for (int _i919 = 0; _i919 < _list917.size; ++_i919) { - _elem910 = new PartitionSpec(); - _elem910.read(iprot); - struct.success.add(_elem910); + _elem918 = new PartitionSpec(); + _elem918.read(iprot); + struct.success.add(_elem918); } } struct.setSuccessIsSet(true); @@ -87499,13 +88027,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list912 = iprot.readListBegin(); - struct.success = new ArrayList(_list912.size); - String _elem913; - for (int _i914 = 0; _i914 < _list912.size; ++_i914) + org.apache.thrift.protocol.TList _list920 = iprot.readListBegin(); + struct.success = new ArrayList(_list920.size); + String _elem921; + for (int _i922 = 0; _i922 < _list920.size; ++_i922) { - _elem913 = iprot.readString(); - struct.success.add(_elem913); + _elem921 = iprot.readString(); + struct.success.add(_elem921); } iprot.readListEnd(); } @@ -87540,9 +88068,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter915 : struct.success) + for (String _iter923 : struct.success) { - oprot.writeString(_iter915); + oprot.writeString(_iter923); } oprot.writeListEnd(); } @@ -87581,9 +88109,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter916 : struct.success) + for (String _iter924 : struct.success) { - oprot.writeString(_iter916); + oprot.writeString(_iter924); } } } @@ -87598,13 +88126,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list917 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list917.size); - String _elem918; - for (int _i919 = 0; _i919 < _list917.size; ++_i919) + org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list925.size); + String _elem926; + for (int _i927 = 0; _i927 < _list925.size; ++_i927) { - _elem918 = iprot.readString(); - struct.success.add(_elem918); + _elem926 = iprot.readString(); + struct.success.add(_elem926); } } struct.setSuccessIsSet(true); @@ -88192,13 +88720,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list920 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list920.size); - String _elem921; - for (int _i922 = 0; _i922 < _list920.size; ++_i922) + org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list928.size); + String _elem929; + for (int _i930 = 0; _i930 < _list928.size; ++_i930) { - _elem921 = iprot.readString(); - struct.part_vals.add(_elem921); + _elem929 = iprot.readString(); + struct.part_vals.add(_elem929); } iprot.readListEnd(); } @@ -88242,9 +88770,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter923 : struct.part_vals) + for (String _iter931 : struct.part_vals) { - oprot.writeString(_iter923); + oprot.writeString(_iter931); } oprot.writeListEnd(); } @@ -88293,9 +88821,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter924 : struct.part_vals) + for (String _iter932 : struct.part_vals) { - oprot.writeString(_iter924); + oprot.writeString(_iter932); } } } @@ -88318,13 +88846,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list925 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list925.size); - String _elem926; - for (int _i927 = 0; _i927 < _list925.size; ++_i927) + org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list933.size); + String _elem934; + for (int _i935 = 0; _i935 < _list933.size; ++_i935) { - _elem926 = iprot.readString(); - struct.part_vals.add(_elem926); + _elem934 = iprot.readString(); + struct.part_vals.add(_elem934); } } struct.setPart_valsIsSet(true); @@ -88815,14 +89343,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list928 = iprot.readListBegin(); - struct.success = new ArrayList(_list928.size); - Partition _elem929; - for (int _i930 = 0; _i930 < _list928.size; ++_i930) + org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); + struct.success = new ArrayList(_list936.size); + Partition _elem937; + for (int _i938 = 0; _i938 < _list936.size; ++_i938) { - _elem929 = new Partition(); - _elem929.read(iprot); - struct.success.add(_elem929); + _elem937 = new Partition(); + _elem937.read(iprot); + struct.success.add(_elem937); } iprot.readListEnd(); } @@ -88866,9 +89394,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter931 : struct.success) + for (Partition _iter939 : struct.success) { - _iter931.write(oprot); + _iter939.write(oprot); } oprot.writeListEnd(); } @@ -88915,9 +89443,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter932 : struct.success) + for (Partition _iter940 : struct.success) { - _iter932.write(oprot); + _iter940.write(oprot); } } } @@ -88935,14 +89463,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list933 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list933.size); - Partition _elem934; - for (int _i935 = 0; _i935 < _list933.size; ++_i935) + org.apache.thrift.protocol.TList _list941 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list941.size); + Partition _elem942; + for (int _i943 = 0; _i943 < _list941.size; ++_i943) { - _elem934 = new Partition(); - _elem934.read(iprot); - struct.success.add(_elem934); + _elem942 = new Partition(); + _elem942.read(iprot); + struct.success.add(_elem942); } } struct.setSuccessIsSet(true); @@ -89714,13 +90242,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list936 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list936.size); - String _elem937; - for (int _i938 = 0; _i938 < _list936.size; ++_i938) + org.apache.thrift.protocol.TList _list944 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list944.size); + String _elem945; + for (int _i946 = 0; _i946 < _list944.size; ++_i946) { - _elem937 = iprot.readString(); - struct.part_vals.add(_elem937); + _elem945 = iprot.readString(); + struct.part_vals.add(_elem945); } iprot.readListEnd(); } @@ -89748,13 +90276,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list939 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list939.size); - String _elem940; - for (int _i941 = 0; _i941 < _list939.size; ++_i941) + org.apache.thrift.protocol.TList _list947 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list947.size); + String _elem948; + for (int _i949 = 0; _i949 < _list947.size; ++_i949) { - _elem940 = iprot.readString(); - struct.group_names.add(_elem940); + _elem948 = iprot.readString(); + struct.group_names.add(_elem948); } iprot.readListEnd(); } @@ -89790,9 +90318,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter942 : struct.part_vals) + for (String _iter950 : struct.part_vals) { - oprot.writeString(_iter942); + oprot.writeString(_iter950); } oprot.writeListEnd(); } @@ -89810,9 +90338,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter943 : struct.group_names) + for (String _iter951 : struct.group_names) { - oprot.writeString(_iter943); + oprot.writeString(_iter951); } oprot.writeListEnd(); } @@ -89864,9 +90392,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter944 : struct.part_vals) + for (String _iter952 : struct.part_vals) { - oprot.writeString(_iter944); + oprot.writeString(_iter952); } } } @@ -89879,9 +90407,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter945 : struct.group_names) + for (String _iter953 : struct.group_names) { - oprot.writeString(_iter945); + oprot.writeString(_iter953); } } } @@ -89901,13 +90429,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list946 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list946.size); - String _elem947; - for (int _i948 = 0; _i948 < _list946.size; ++_i948) + org.apache.thrift.protocol.TList _list954 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list954.size); + String _elem955; + for (int _i956 = 0; _i956 < _list954.size; ++_i956) { - _elem947 = iprot.readString(); - struct.part_vals.add(_elem947); + _elem955 = iprot.readString(); + struct.part_vals.add(_elem955); } } struct.setPart_valsIsSet(true); @@ -89922,13 +90450,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list949.size); - String _elem950; - for (int _i951 = 0; _i951 < _list949.size; ++_i951) + org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list957.size); + String _elem958; + for (int _i959 = 0; _i959 < _list957.size; ++_i959) { - _elem950 = iprot.readString(); - struct.group_names.add(_elem950); + _elem958 = iprot.readString(); + struct.group_names.add(_elem958); } } struct.setGroup_namesIsSet(true); @@ -90415,14 +90943,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list952 = iprot.readListBegin(); - struct.success = new ArrayList(_list952.size); - Partition _elem953; - for (int _i954 = 0; _i954 < _list952.size; ++_i954) + org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); + struct.success = new ArrayList(_list960.size); + Partition _elem961; + for (int _i962 = 0; _i962 < _list960.size; ++_i962) { - _elem953 = new Partition(); - _elem953.read(iprot); - struct.success.add(_elem953); + _elem961 = new Partition(); + _elem961.read(iprot); + struct.success.add(_elem961); } iprot.readListEnd(); } @@ -90466,9 +90994,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter955 : struct.success) + for (Partition _iter963 : struct.success) { - _iter955.write(oprot); + _iter963.write(oprot); } oprot.writeListEnd(); } @@ -90515,9 +91043,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter956 : struct.success) + for (Partition _iter964 : struct.success) { - _iter956.write(oprot); + _iter964.write(oprot); } } } @@ -90535,14 +91063,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list957.size); - Partition _elem958; - for (int _i959 = 0; _i959 < _list957.size; ++_i959) + org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list965.size); + Partition _elem966; + for (int _i967 = 0; _i967 < _list965.size; ++_i967) { - _elem958 = new Partition(); - _elem958.read(iprot); - struct.success.add(_elem958); + _elem966 = new Partition(); + _elem966.read(iprot); + struct.success.add(_elem966); } } struct.setSuccessIsSet(true); @@ -91135,13 +91663,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list960 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list960.size); - String _elem961; - for (int _i962 = 0; _i962 < _list960.size; ++_i962) + org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list968.size); + String _elem969; + for (int _i970 = 0; _i970 < _list968.size; ++_i970) { - _elem961 = iprot.readString(); - struct.part_vals.add(_elem961); + _elem969 = iprot.readString(); + struct.part_vals.add(_elem969); } iprot.readListEnd(); } @@ -91185,9 +91713,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter963 : struct.part_vals) + for (String _iter971 : struct.part_vals) { - oprot.writeString(_iter963); + oprot.writeString(_iter971); } oprot.writeListEnd(); } @@ -91236,9 +91764,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter964 : struct.part_vals) + for (String _iter972 : struct.part_vals) { - oprot.writeString(_iter964); + oprot.writeString(_iter972); } } } @@ -91261,13 +91789,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list965 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list965.size); - String _elem966; - for (int _i967 = 0; _i967 < _list965.size; ++_i967) + org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list973.size); + String _elem974; + for (int _i975 = 0; _i975 < _list973.size; ++_i975) { - _elem966 = iprot.readString(); - struct.part_vals.add(_elem966); + _elem974 = iprot.readString(); + struct.part_vals.add(_elem974); } } struct.setPart_valsIsSet(true); @@ -91755,13 +92283,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list968 = iprot.readListBegin(); - struct.success = new ArrayList(_list968.size); - String _elem969; - for (int _i970 = 0; _i970 < _list968.size; ++_i970) + org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); + struct.success = new ArrayList(_list976.size); + String _elem977; + for (int _i978 = 0; _i978 < _list976.size; ++_i978) { - _elem969 = iprot.readString(); - struct.success.add(_elem969); + _elem977 = iprot.readString(); + struct.success.add(_elem977); } iprot.readListEnd(); } @@ -91805,9 +92333,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter971 : struct.success) + for (String _iter979 : struct.success) { - oprot.writeString(_iter971); + oprot.writeString(_iter979); } oprot.writeListEnd(); } @@ -91854,9 +92382,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter972 : struct.success) + for (String _iter980 : struct.success) { - oprot.writeString(_iter972); + oprot.writeString(_iter980); } } } @@ -91874,13 +92402,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list973 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list973.size); - String _elem974; - for (int _i975 = 0; _i975 < _list973.size; ++_i975) + org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list981.size); + String _elem982; + for (int _i983 = 0; _i983 < _list981.size; ++_i983) { - _elem974 = iprot.readString(); - struct.success.add(_elem974); + _elem982 = iprot.readString(); + struct.success.add(_elem982); } } struct.setSuccessIsSet(true); @@ -93047,14 +93575,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list976 = iprot.readListBegin(); - struct.success = new ArrayList(_list976.size); - Partition _elem977; - for (int _i978 = 0; _i978 < _list976.size; ++_i978) + org.apache.thrift.protocol.TList _list984 = iprot.readListBegin(); + struct.success = new ArrayList(_list984.size); + Partition _elem985; + for (int _i986 = 0; _i986 < _list984.size; ++_i986) { - _elem977 = new Partition(); - _elem977.read(iprot); - struct.success.add(_elem977); + _elem985 = new Partition(); + _elem985.read(iprot); + struct.success.add(_elem985); } iprot.readListEnd(); } @@ -93098,9 +93626,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter979 : struct.success) + for (Partition _iter987 : struct.success) { - _iter979.write(oprot); + _iter987.write(oprot); } oprot.writeListEnd(); } @@ -93147,9 +93675,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter980 : struct.success) + for (Partition _iter988 : struct.success) { - _iter980.write(oprot); + _iter988.write(oprot); } } } @@ -93167,14 +93695,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list981 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list981.size); - Partition _elem982; - for (int _i983 = 0; _i983 < _list981.size; ++_i983) + org.apache.thrift.protocol.TList _list989 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list989.size); + Partition _elem990; + for (int _i991 = 0; _i991 < _list989.size; ++_i991) { - _elem982 = new Partition(); - _elem982.read(iprot); - struct.success.add(_elem982); + _elem990 = new Partition(); + _elem990.read(iprot); + struct.success.add(_elem990); } } struct.setSuccessIsSet(true); @@ -94341,14 +94869,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list984 = iprot.readListBegin(); - struct.success = new ArrayList(_list984.size); - PartitionSpec _elem985; - for (int _i986 = 0; _i986 < _list984.size; ++_i986) + org.apache.thrift.protocol.TList _list992 = iprot.readListBegin(); + struct.success = new ArrayList(_list992.size); + PartitionSpec _elem993; + for (int _i994 = 0; _i994 < _list992.size; ++_i994) { - _elem985 = new PartitionSpec(); - _elem985.read(iprot); - struct.success.add(_elem985); + _elem993 = new PartitionSpec(); + _elem993.read(iprot); + struct.success.add(_elem993); } iprot.readListEnd(); } @@ -94392,9 +94920,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter987 : struct.success) + for (PartitionSpec _iter995 : struct.success) { - _iter987.write(oprot); + _iter995.write(oprot); } oprot.writeListEnd(); } @@ -94441,9 +94969,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter988 : struct.success) + for (PartitionSpec _iter996 : struct.success) { - _iter988.write(oprot); + _iter996.write(oprot); } } } @@ -94461,14 +94989,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list989 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list989.size); - PartitionSpec _elem990; - for (int _i991 = 0; _i991 < _list989.size; ++_i991) + org.apache.thrift.protocol.TList _list997 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list997.size); + PartitionSpec _elem998; + for (int _i999 = 0; _i999 < _list997.size; ++_i999) { - _elem990 = new PartitionSpec(); - _elem990.read(iprot); - struct.success.add(_elem990); + _elem998 = new PartitionSpec(); + _elem998.read(iprot); + struct.success.add(_elem998); } } struct.setSuccessIsSet(true); @@ -97052,13 +97580,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list992 = iprot.readListBegin(); - struct.names = new ArrayList(_list992.size); - String _elem993; - for (int _i994 = 0; _i994 < _list992.size; ++_i994) + org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin(); + struct.names = new ArrayList(_list1000.size); + String _elem1001; + for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002) { - _elem993 = iprot.readString(); - struct.names.add(_elem993); + _elem1001 = iprot.readString(); + struct.names.add(_elem1001); } iprot.readListEnd(); } @@ -97094,9 +97622,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter995 : struct.names) + for (String _iter1003 : struct.names) { - oprot.writeString(_iter995); + oprot.writeString(_iter1003); } oprot.writeListEnd(); } @@ -97139,9 +97667,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter996 : struct.names) + for (String _iter1004 : struct.names) { - oprot.writeString(_iter996); + oprot.writeString(_iter1004); } } } @@ -97161,13 +97689,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list997 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list997.size); - String _elem998; - for (int _i999 = 0; _i999 < _list997.size; ++_i999) + org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list1005.size); + String _elem1006; + for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) { - _elem998 = iprot.readString(); - struct.names.add(_elem998); + _elem1006 = iprot.readString(); + struct.names.add(_elem1006); } } struct.setNamesIsSet(true); @@ -97654,14 +98182,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1000 = iprot.readListBegin(); - struct.success = new ArrayList(_list1000.size); - Partition _elem1001; - for (int _i1002 = 0; _i1002 < _list1000.size; ++_i1002) + org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); + struct.success = new ArrayList(_list1008.size); + Partition _elem1009; + for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) { - _elem1001 = new Partition(); - _elem1001.read(iprot); - struct.success.add(_elem1001); + _elem1009 = new Partition(); + _elem1009.read(iprot); + struct.success.add(_elem1009); } iprot.readListEnd(); } @@ -97705,9 +98233,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1003 : struct.success) + for (Partition _iter1011 : struct.success) { - _iter1003.write(oprot); + _iter1011.write(oprot); } oprot.writeListEnd(); } @@ -97754,9 +98282,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1004 : struct.success) + for (Partition _iter1012 : struct.success) { - _iter1004.write(oprot); + _iter1012.write(oprot); } } } @@ -97774,14 +98302,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1005 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1005.size); - Partition _elem1006; - for (int _i1007 = 0; _i1007 < _list1005.size; ++_i1007) + org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1013.size); + Partition _elem1014; + for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) { - _elem1006 = new Partition(); - _elem1006.read(iprot); - struct.success.add(_elem1006); + _elem1014 = new Partition(); + _elem1014.read(iprot); + struct.success.add(_elem1014); } } struct.setSuccessIsSet(true); @@ -99331,14 +99859,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1008.size); - Partition _elem1009; - for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) + org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1016.size); + Partition _elem1017; + for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) { - _elem1009 = new Partition(); - _elem1009.read(iprot); - struct.new_parts.add(_elem1009); + _elem1017 = new Partition(); + _elem1017.read(iprot); + struct.new_parts.add(_elem1017); } iprot.readListEnd(); } @@ -99374,9 +99902,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1011 : struct.new_parts) + for (Partition _iter1019 : struct.new_parts) { - _iter1011.write(oprot); + _iter1019.write(oprot); } oprot.writeListEnd(); } @@ -99419,9 +99947,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1012 : struct.new_parts) + for (Partition _iter1020 : struct.new_parts) { - _iter1012.write(oprot); + _iter1020.write(oprot); } } } @@ -99441,14 +99969,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1013.size); - Partition _elem1014; - for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) + org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1021.size); + Partition _elem1022; + for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) { - _elem1014 = new Partition(); - _elem1014.read(iprot); - struct.new_parts.add(_elem1014); + _elem1022 = new Partition(); + _elem1022.read(iprot); + struct.new_parts.add(_elem1022); } } struct.setNew_partsIsSet(true); @@ -100501,14 +101029,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list1016.size); - Partition _elem1017; - for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) + org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list1024.size); + Partition _elem1025; + for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) { - _elem1017 = new Partition(); - _elem1017.read(iprot); - struct.new_parts.add(_elem1017); + _elem1025 = new Partition(); + _elem1025.read(iprot); + struct.new_parts.add(_elem1025); } iprot.readListEnd(); } @@ -100553,9 +101081,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1019 : struct.new_parts) + for (Partition _iter1027 : struct.new_parts) { - _iter1019.write(oprot); + _iter1027.write(oprot); } oprot.writeListEnd(); } @@ -100606,9 +101134,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1020 : struct.new_parts) + for (Partition _iter1028 : struct.new_parts) { - _iter1020.write(oprot); + _iter1028.write(oprot); } } } @@ -100631,14 +101159,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list1021.size); - Partition _elem1022; - for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) + org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list1029.size); + Partition _elem1030; + for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) { - _elem1022 = new Partition(); - _elem1022.read(iprot); - struct.new_parts.add(_elem1022); + _elem1030 = new Partition(); + _elem1030.read(iprot); + struct.new_parts.add(_elem1030); } } struct.setNew_partsIsSet(true); @@ -102839,13 +103367,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1024.size); - String _elem1025; - for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) + org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1032.size); + String _elem1033; + for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) { - _elem1025 = iprot.readString(); - struct.part_vals.add(_elem1025); + _elem1033 = iprot.readString(); + struct.part_vals.add(_elem1033); } iprot.readListEnd(); } @@ -102890,9 +103418,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1027 : struct.part_vals) + for (String _iter1035 : struct.part_vals) { - oprot.writeString(_iter1027); + oprot.writeString(_iter1035); } oprot.writeListEnd(); } @@ -102943,9 +103471,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1028 : struct.part_vals) + for (String _iter1036 : struct.part_vals) { - oprot.writeString(_iter1028); + oprot.writeString(_iter1036); } } } @@ -102968,13 +103496,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1029.size); - String _elem1030; - for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) + org.apache.thrift.protocol.TList _list1037 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1037.size); + String _elem1038; + for (int _i1039 = 0; _i1039 < _list1037.size; ++_i1039) { - _elem1030 = iprot.readString(); - struct.part_vals.add(_elem1030); + _elem1038 = iprot.readString(); + struct.part_vals.add(_elem1038); } } struct.setPart_valsIsSet(true); @@ -103848,13 +104376,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1032 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list1032.size); - String _elem1033; - for (int _i1034 = 0; _i1034 < _list1032.size; ++_i1034) + org.apache.thrift.protocol.TList _list1040 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list1040.size); + String _elem1041; + for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042) { - _elem1033 = iprot.readString(); - struct.part_vals.add(_elem1033); + _elem1041 = iprot.readString(); + struct.part_vals.add(_elem1041); } iprot.readListEnd(); } @@ -103888,9 +104416,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter1035 : struct.part_vals) + for (String _iter1043 : struct.part_vals) { - oprot.writeString(_iter1035); + oprot.writeString(_iter1043); } oprot.writeListEnd(); } @@ -103927,9 +104455,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter1036 : struct.part_vals) + for (String _iter1044 : struct.part_vals) { - oprot.writeString(_iter1036); + oprot.writeString(_iter1044); } } } @@ -103944,13 +104472,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1037 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list1037.size); - String _elem1038; - for (int _i1039 = 0; _i1039 < _list1037.size; ++_i1039) + org.apache.thrift.protocol.TList _list1045 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list1045.size); + String _elem1046; + for (int _i1047 = 0; _i1047 < _list1045.size; ++_i1047) { - _elem1038 = iprot.readString(); - struct.part_vals.add(_elem1038); + _elem1046 = iprot.readString(); + struct.part_vals.add(_elem1046); } } struct.setPart_valsIsSet(true); @@ -106105,13 +106633,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1040 = iprot.readListBegin(); - struct.success = new ArrayList(_list1040.size); - String _elem1041; - for (int _i1042 = 0; _i1042 < _list1040.size; ++_i1042) + org.apache.thrift.protocol.TList _list1048 = iprot.readListBegin(); + struct.success = new ArrayList(_list1048.size); + String _elem1049; + for (int _i1050 = 0; _i1050 < _list1048.size; ++_i1050) { - _elem1041 = iprot.readString(); - struct.success.add(_elem1041); + _elem1049 = iprot.readString(); + struct.success.add(_elem1049); } iprot.readListEnd(); } @@ -106146,9 +106674,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1043 : struct.success) + for (String _iter1051 : struct.success) { - oprot.writeString(_iter1043); + oprot.writeString(_iter1051); } oprot.writeListEnd(); } @@ -106187,9 +106715,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1044 : struct.success) + for (String _iter1052 : struct.success) { - oprot.writeString(_iter1044); + oprot.writeString(_iter1052); } } } @@ -106204,13 +106732,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1045 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1045.size); - String _elem1046; - for (int _i1047 = 0; _i1047 < _list1045.size; ++_i1047) + org.apache.thrift.protocol.TList _list1053 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1053.size); + String _elem1054; + for (int _i1055 = 0; _i1055 < _list1053.size; ++_i1055) { - _elem1046 = iprot.readString(); - struct.success.add(_elem1046); + _elem1054 = iprot.readString(); + struct.success.add(_elem1054); } } struct.setSuccessIsSet(true); @@ -106973,15 +107501,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1048 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map1048.size); - String _key1049; - String _val1050; - for (int _i1051 = 0; _i1051 < _map1048.size; ++_i1051) + org.apache.thrift.protocol.TMap _map1056 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1056.size); + String _key1057; + String _val1058; + for (int _i1059 = 0; _i1059 < _map1056.size; ++_i1059) { - _key1049 = iprot.readString(); - _val1050 = iprot.readString(); - struct.success.put(_key1049, _val1050); + _key1057 = iprot.readString(); + _val1058 = iprot.readString(); + struct.success.put(_key1057, _val1058); } iprot.readMapEnd(); } @@ -107016,10 +107544,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter1052 : struct.success.entrySet()) + for (Map.Entry _iter1060 : struct.success.entrySet()) { - oprot.writeString(_iter1052.getKey()); - oprot.writeString(_iter1052.getValue()); + oprot.writeString(_iter1060.getKey()); + oprot.writeString(_iter1060.getValue()); } oprot.writeMapEnd(); } @@ -107058,10 +107586,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter1053 : struct.success.entrySet()) + for (Map.Entry _iter1061 : struct.success.entrySet()) { - oprot.writeString(_iter1053.getKey()); - oprot.writeString(_iter1053.getValue()); + oprot.writeString(_iter1061.getKey()); + oprot.writeString(_iter1061.getValue()); } } } @@ -107076,15 +107604,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1054 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map1054.size); - String _key1055; - String _val1056; - for (int _i1057 = 0; _i1057 < _map1054.size; ++_i1057) + org.apache.thrift.protocol.TMap _map1062 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1062.size); + String _key1063; + String _val1064; + for (int _i1065 = 0; _i1065 < _map1062.size; ++_i1065) { - _key1055 = iprot.readString(); - _val1056 = iprot.readString(); - struct.success.put(_key1055, _val1056); + _key1063 = iprot.readString(); + _val1064 = iprot.readString(); + struct.success.put(_key1063, _val1064); } } struct.setSuccessIsSet(true); @@ -107679,15 +108207,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1058 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1058.size); - String _key1059; - String _val1060; - for (int _i1061 = 0; _i1061 < _map1058.size; ++_i1061) + org.apache.thrift.protocol.TMap _map1066 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1066.size); + String _key1067; + String _val1068; + for (int _i1069 = 0; _i1069 < _map1066.size; ++_i1069) { - _key1059 = iprot.readString(); - _val1060 = iprot.readString(); - struct.part_vals.put(_key1059, _val1060); + _key1067 = iprot.readString(); + _val1068 = iprot.readString(); + struct.part_vals.put(_key1067, _val1068); } iprot.readMapEnd(); } @@ -107731,10 +108259,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1062 : struct.part_vals.entrySet()) + for (Map.Entry _iter1070 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1062.getKey()); - oprot.writeString(_iter1062.getValue()); + oprot.writeString(_iter1070.getKey()); + oprot.writeString(_iter1070.getValue()); } oprot.writeMapEnd(); } @@ -107785,10 +108313,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1063 : struct.part_vals.entrySet()) + for (Map.Entry _iter1071 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1063.getKey()); - oprot.writeString(_iter1063.getValue()); + oprot.writeString(_iter1071.getKey()); + oprot.writeString(_iter1071.getValue()); } } } @@ -107811,15 +108339,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1064 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1064.size); - String _key1065; - String _val1066; - for (int _i1067 = 0; _i1067 < _map1064.size; ++_i1067) + org.apache.thrift.protocol.TMap _map1072 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1072.size); + String _key1073; + String _val1074; + for (int _i1075 = 0; _i1075 < _map1072.size; ++_i1075) { - _key1065 = iprot.readString(); - _val1066 = iprot.readString(); - struct.part_vals.put(_key1065, _val1066); + _key1073 = iprot.readString(); + _val1074 = iprot.readString(); + struct.part_vals.put(_key1073, _val1074); } } struct.setPart_valsIsSet(true); @@ -109303,15 +109831,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1068 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map1068.size); - String _key1069; - String _val1070; - for (int _i1071 = 0; _i1071 < _map1068.size; ++_i1071) + org.apache.thrift.protocol.TMap _map1076 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1076.size); + String _key1077; + String _val1078; + for (int _i1079 = 0; _i1079 < _map1076.size; ++_i1079) { - _key1069 = iprot.readString(); - _val1070 = iprot.readString(); - struct.part_vals.put(_key1069, _val1070); + _key1077 = iprot.readString(); + _val1078 = iprot.readString(); + struct.part_vals.put(_key1077, _val1078); } iprot.readMapEnd(); } @@ -109355,10 +109883,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter1072 : struct.part_vals.entrySet()) + for (Map.Entry _iter1080 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1072.getKey()); - oprot.writeString(_iter1072.getValue()); + oprot.writeString(_iter1080.getKey()); + oprot.writeString(_iter1080.getValue()); } oprot.writeMapEnd(); } @@ -109409,10 +109937,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter1073 : struct.part_vals.entrySet()) + for (Map.Entry _iter1081 : struct.part_vals.entrySet()) { - oprot.writeString(_iter1073.getKey()); - oprot.writeString(_iter1073.getValue()); + oprot.writeString(_iter1081.getKey()); + oprot.writeString(_iter1081.getValue()); } } } @@ -109435,15 +109963,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1074 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1074.size); - String _key1075; - String _val1076; - for (int _i1077 = 0; _i1077 < _map1074.size; ++_i1077) + org.apache.thrift.protocol.TMap _map1082 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1082.size); + String _key1083; + String _val1084; + for (int _i1085 = 0; _i1085 < _map1082.size; ++_i1085) { - _key1075 = iprot.readString(); - _val1076 = iprot.readString(); - struct.part_vals.put(_key1075, _val1076); + _key1083 = iprot.readString(); + _val1084 = iprot.readString(); + struct.part_vals.put(_key1083, _val1084); } } struct.setPart_valsIsSet(true); @@ -116167,14 +116695,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1078 = iprot.readListBegin(); - struct.success = new ArrayList(_list1078.size); - Index _elem1079; - for (int _i1080 = 0; _i1080 < _list1078.size; ++_i1080) + org.apache.thrift.protocol.TList _list1086 = iprot.readListBegin(); + struct.success = new ArrayList(_list1086.size); + Index _elem1087; + for (int _i1088 = 0; _i1088 < _list1086.size; ++_i1088) { - _elem1079 = new Index(); - _elem1079.read(iprot); - struct.success.add(_elem1079); + _elem1087 = new Index(); + _elem1087.read(iprot); + struct.success.add(_elem1087); } iprot.readListEnd(); } @@ -116218,9 +116746,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Index _iter1081 : struct.success) + for (Index _iter1089 : struct.success) { - _iter1081.write(oprot); + _iter1089.write(oprot); } oprot.writeListEnd(); } @@ -116267,9 +116795,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Index _iter1082 : struct.success) + for (Index _iter1090 : struct.success) { - _iter1082.write(oprot); + _iter1090.write(oprot); } } } @@ -116287,14 +116815,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1083 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1083.size); - Index _elem1084; - for (int _i1085 = 0; _i1085 < _list1083.size; ++_i1085) + org.apache.thrift.protocol.TList _list1091 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1091.size); + Index _elem1092; + for (int _i1093 = 0; _i1093 < _list1091.size; ++_i1093) { - _elem1084 = new Index(); - _elem1084.read(iprot); - struct.success.add(_elem1084); + _elem1092 = new Index(); + _elem1092.read(iprot); + struct.success.add(_elem1092); } } struct.setSuccessIsSet(true); @@ -117273,13 +117801,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1086 = iprot.readListBegin(); - struct.success = new ArrayList(_list1086.size); - String _elem1087; - for (int _i1088 = 0; _i1088 < _list1086.size; ++_i1088) + org.apache.thrift.protocol.TList _list1094 = iprot.readListBegin(); + struct.success = new ArrayList(_list1094.size); + String _elem1095; + for (int _i1096 = 0; _i1096 < _list1094.size; ++_i1096) { - _elem1087 = iprot.readString(); - struct.success.add(_elem1087); + _elem1095 = iprot.readString(); + struct.success.add(_elem1095); } iprot.readListEnd(); } @@ -117314,9 +117842,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1089 : struct.success) + for (String _iter1097 : struct.success) { - oprot.writeString(_iter1089); + oprot.writeString(_iter1097); } oprot.writeListEnd(); } @@ -117355,9 +117883,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1090 : struct.success) + for (String _iter1098 : struct.success) { - oprot.writeString(_iter1090); + oprot.writeString(_iter1098); } } } @@ -117372,13 +117900,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1091 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1091.size); - String _elem1092; - for (int _i1093 = 0; _i1093 < _list1091.size; ++_i1093) + org.apache.thrift.protocol.TList _list1099 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1099.size); + String _elem1100; + for (int _i1101 = 0; _i1101 < _list1099.size; ++_i1101) { - _elem1092 = iprot.readString(); - struct.success.add(_elem1092); + _elem1100 = iprot.readString(); + struct.success.add(_elem1100); } } struct.setSuccessIsSet(true); @@ -134989,13 +135517,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1094 = iprot.readListBegin(); - struct.success = new ArrayList(_list1094.size); - String _elem1095; - for (int _i1096 = 0; _i1096 < _list1094.size; ++_i1096) + org.apache.thrift.protocol.TList _list1102 = iprot.readListBegin(); + struct.success = new ArrayList(_list1102.size); + String _elem1103; + for (int _i1104 = 0; _i1104 < _list1102.size; ++_i1104) { - _elem1095 = iprot.readString(); - struct.success.add(_elem1095); + _elem1103 = iprot.readString(); + struct.success.add(_elem1103); } iprot.readListEnd(); } @@ -135030,9 +135558,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1097 : struct.success) + for (String _iter1105 : struct.success) { - oprot.writeString(_iter1097); + oprot.writeString(_iter1105); } oprot.writeListEnd(); } @@ -135071,9 +135599,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1098 : struct.success) + for (String _iter1106 : struct.success) { - oprot.writeString(_iter1098); + oprot.writeString(_iter1106); } } } @@ -135088,13 +135616,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1099 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1099.size); - String _elem1100; - for (int _i1101 = 0; _i1101 < _list1099.size; ++_i1101) + org.apache.thrift.protocol.TList _list1107 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1107.size); + String _elem1108; + for (int _i1109 = 0; _i1109 < _list1107.size; ++_i1109) { - _elem1100 = iprot.readString(); - struct.success.add(_elem1100); + _elem1108 = iprot.readString(); + struct.success.add(_elem1108); } } struct.setSuccessIsSet(true); @@ -139149,13 +139677,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1102 = iprot.readListBegin(); - struct.success = new ArrayList(_list1102.size); - String _elem1103; - for (int _i1104 = 0; _i1104 < _list1102.size; ++_i1104) + org.apache.thrift.protocol.TList _list1110 = iprot.readListBegin(); + struct.success = new ArrayList(_list1110.size); + String _elem1111; + for (int _i1112 = 0; _i1112 < _list1110.size; ++_i1112) { - _elem1103 = iprot.readString(); - struct.success.add(_elem1103); + _elem1111 = iprot.readString(); + struct.success.add(_elem1111); } iprot.readListEnd(); } @@ -139190,9 +139718,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1105 : struct.success) + for (String _iter1113 : struct.success) { - oprot.writeString(_iter1105); + oprot.writeString(_iter1113); } oprot.writeListEnd(); } @@ -139231,9 +139759,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1106 : struct.success) + for (String _iter1114 : struct.success) { - oprot.writeString(_iter1106); + oprot.writeString(_iter1114); } } } @@ -139248,13 +139776,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1107 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1107.size); - String _elem1108; - for (int _i1109 = 0; _i1109 < _list1107.size; ++_i1109) + org.apache.thrift.protocol.TList _list1115 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1115.size); + String _elem1116; + for (int _i1117 = 0; _i1117 < _list1115.size; ++_i1117) { - _elem1108 = iprot.readString(); - struct.success.add(_elem1108); + _elem1116 = iprot.readString(); + struct.success.add(_elem1116); } } struct.setSuccessIsSet(true); @@ -142545,14 +143073,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1110 = iprot.readListBegin(); - struct.success = new ArrayList(_list1110.size); - Role _elem1111; - for (int _i1112 = 0; _i1112 < _list1110.size; ++_i1112) + org.apache.thrift.protocol.TList _list1118 = iprot.readListBegin(); + struct.success = new ArrayList(_list1118.size); + Role _elem1119; + for (int _i1120 = 0; _i1120 < _list1118.size; ++_i1120) { - _elem1111 = new Role(); - _elem1111.read(iprot); - struct.success.add(_elem1111); + _elem1119 = new Role(); + _elem1119.read(iprot); + struct.success.add(_elem1119); } iprot.readListEnd(); } @@ -142587,9 +143115,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1113 : struct.success) + for (Role _iter1121 : struct.success) { - _iter1113.write(oprot); + _iter1121.write(oprot); } oprot.writeListEnd(); } @@ -142628,9 +143156,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1114 : struct.success) + for (Role _iter1122 : struct.success) { - _iter1114.write(oprot); + _iter1122.write(oprot); } } } @@ -142645,14 +143173,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1115 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1115.size); - Role _elem1116; - for (int _i1117 = 0; _i1117 < _list1115.size; ++_i1117) + org.apache.thrift.protocol.TList _list1123 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1123.size); + Role _elem1124; + for (int _i1125 = 0; _i1125 < _list1123.size; ++_i1125) { - _elem1116 = new Role(); - _elem1116.read(iprot); - struct.success.add(_elem1116); + _elem1124 = new Role(); + _elem1124.read(iprot); + struct.success.add(_elem1124); } } struct.setSuccessIsSet(true); @@ -145657,13 +146185,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1118 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1118.size); - String _elem1119; - for (int _i1120 = 0; _i1120 < _list1118.size; ++_i1120) + org.apache.thrift.protocol.TList _list1126 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1126.size); + String _elem1127; + for (int _i1128 = 0; _i1128 < _list1126.size; ++_i1128) { - _elem1119 = iprot.readString(); - struct.group_names.add(_elem1119); + _elem1127 = iprot.readString(); + struct.group_names.add(_elem1127); } iprot.readListEnd(); } @@ -145699,9 +146227,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1121 : struct.group_names) + for (String _iter1129 : struct.group_names) { - oprot.writeString(_iter1121); + oprot.writeString(_iter1129); } oprot.writeListEnd(); } @@ -145744,9 +146272,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1122 : struct.group_names) + for (String _iter1130 : struct.group_names) { - oprot.writeString(_iter1122); + oprot.writeString(_iter1130); } } } @@ -145767,13 +146295,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1123 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1123.size); - String _elem1124; - for (int _i1125 = 0; _i1125 < _list1123.size; ++_i1125) + org.apache.thrift.protocol.TList _list1131 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1131.size); + String _elem1132; + for (int _i1133 = 0; _i1133 < _list1131.size; ++_i1133) { - _elem1124 = iprot.readString(); - struct.group_names.add(_elem1124); + _elem1132 = iprot.readString(); + struct.group_names.add(_elem1132); } } struct.setGroup_namesIsSet(true); @@ -147231,14 +147759,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1126 = iprot.readListBegin(); - struct.success = new ArrayList(_list1126.size); - HiveObjectPrivilege _elem1127; - for (int _i1128 = 0; _i1128 < _list1126.size; ++_i1128) + org.apache.thrift.protocol.TList _list1134 = iprot.readListBegin(); + struct.success = new ArrayList(_list1134.size); + HiveObjectPrivilege _elem1135; + for (int _i1136 = 0; _i1136 < _list1134.size; ++_i1136) { - _elem1127 = new HiveObjectPrivilege(); - _elem1127.read(iprot); - struct.success.add(_elem1127); + _elem1135 = new HiveObjectPrivilege(); + _elem1135.read(iprot); + struct.success.add(_elem1135); } iprot.readListEnd(); } @@ -147273,9 +147801,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1129 : struct.success) + for (HiveObjectPrivilege _iter1137 : struct.success) { - _iter1129.write(oprot); + _iter1137.write(oprot); } oprot.writeListEnd(); } @@ -147314,9 +147842,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1130 : struct.success) + for (HiveObjectPrivilege _iter1138 : struct.success) { - _iter1130.write(oprot); + _iter1138.write(oprot); } } } @@ -147331,14 +147859,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1131 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1131.size); - HiveObjectPrivilege _elem1132; - for (int _i1133 = 0; _i1133 < _list1131.size; ++_i1133) + org.apache.thrift.protocol.TList _list1139 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1139.size); + HiveObjectPrivilege _elem1140; + for (int _i1141 = 0; _i1141 < _list1139.size; ++_i1141) { - _elem1132 = new HiveObjectPrivilege(); - _elem1132.read(iprot); - struct.success.add(_elem1132); + _elem1140 = new HiveObjectPrivilege(); + _elem1140.read(iprot); + struct.success.add(_elem1140); } } struct.setSuccessIsSet(true); @@ -150240,13 +150768,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1134 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1134.size); - String _elem1135; - for (int _i1136 = 0; _i1136 < _list1134.size; ++_i1136) + org.apache.thrift.protocol.TList _list1142 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1142.size); + String _elem1143; + for (int _i1144 = 0; _i1144 < _list1142.size; ++_i1144) { - _elem1135 = iprot.readString(); - struct.group_names.add(_elem1135); + _elem1143 = iprot.readString(); + struct.group_names.add(_elem1143); } iprot.readListEnd(); } @@ -150277,9 +150805,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1137 : struct.group_names) + for (String _iter1145 : struct.group_names) { - oprot.writeString(_iter1137); + oprot.writeString(_iter1145); } oprot.writeListEnd(); } @@ -150316,9 +150844,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1138 : struct.group_names) + for (String _iter1146 : struct.group_names) { - oprot.writeString(_iter1138); + oprot.writeString(_iter1146); } } } @@ -150334,13 +150862,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1139 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1139.size); - String _elem1140; - for (int _i1141 = 0; _i1141 < _list1139.size; ++_i1141) + org.apache.thrift.protocol.TList _list1147 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1147.size); + String _elem1148; + for (int _i1149 = 0; _i1149 < _list1147.size; ++_i1149) { - _elem1140 = iprot.readString(); - struct.group_names.add(_elem1140); + _elem1148 = iprot.readString(); + struct.group_names.add(_elem1148); } } struct.setGroup_namesIsSet(true); @@ -150743,13 +151271,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1142 = iprot.readListBegin(); - struct.success = new ArrayList(_list1142.size); - String _elem1143; - for (int _i1144 = 0; _i1144 < _list1142.size; ++_i1144) + org.apache.thrift.protocol.TList _list1150 = iprot.readListBegin(); + struct.success = new ArrayList(_list1150.size); + String _elem1151; + for (int _i1152 = 0; _i1152 < _list1150.size; ++_i1152) { - _elem1143 = iprot.readString(); - struct.success.add(_elem1143); + _elem1151 = iprot.readString(); + struct.success.add(_elem1151); } iprot.readListEnd(); } @@ -150784,9 +151312,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1145 : struct.success) + for (String _iter1153 : struct.success) { - oprot.writeString(_iter1145); + oprot.writeString(_iter1153); } oprot.writeListEnd(); } @@ -150825,9 +151353,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1146 : struct.success) + for (String _iter1154 : struct.success) { - oprot.writeString(_iter1146); + oprot.writeString(_iter1154); } } } @@ -150842,13 +151370,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1147 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1147.size); - String _elem1148; - for (int _i1149 = 0; _i1149 < _list1147.size; ++_i1149) + org.apache.thrift.protocol.TList _list1155 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1155.size); + String _elem1156; + for (int _i1157 = 0; _i1157 < _list1155.size; ++_i1157) { - _elem1148 = iprot.readString(); - struct.success.add(_elem1148); + _elem1156 = iprot.readString(); + struct.success.add(_elem1156); } } struct.setSuccessIsSet(true); @@ -156139,13 +156667,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1150 = iprot.readListBegin(); - struct.success = new ArrayList(_list1150.size); - String _elem1151; - for (int _i1152 = 0; _i1152 < _list1150.size; ++_i1152) + org.apache.thrift.protocol.TList _list1158 = iprot.readListBegin(); + struct.success = new ArrayList(_list1158.size); + String _elem1159; + for (int _i1160 = 0; _i1160 < _list1158.size; ++_i1160) { - _elem1151 = iprot.readString(); - struct.success.add(_elem1151); + _elem1159 = iprot.readString(); + struct.success.add(_elem1159); } iprot.readListEnd(); } @@ -156171,9 +156699,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1153 : struct.success) + for (String _iter1161 : struct.success) { - oprot.writeString(_iter1153); + oprot.writeString(_iter1161); } oprot.writeListEnd(); } @@ -156204,9 +156732,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1154 : struct.success) + for (String _iter1162 : struct.success) { - oprot.writeString(_iter1154); + oprot.writeString(_iter1162); } } } @@ -156218,13 +156746,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1155 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1155.size); - String _elem1156; - for (int _i1157 = 0; _i1157 < _list1155.size; ++_i1157) + org.apache.thrift.protocol.TList _list1163 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1163.size); + String _elem1164; + for (int _i1165 = 0; _i1165 < _list1163.size; ++_i1165) { - _elem1156 = iprot.readString(); - struct.success.add(_elem1156); + _elem1164 = iprot.readString(); + struct.success.add(_elem1164); } } struct.setSuccessIsSet(true); @@ -159254,13 +159782,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1158 = iprot.readListBegin(); - struct.success = new ArrayList(_list1158.size); - String _elem1159; - for (int _i1160 = 0; _i1160 < _list1158.size; ++_i1160) + org.apache.thrift.protocol.TList _list1166 = iprot.readListBegin(); + struct.success = new ArrayList(_list1166.size); + String _elem1167; + for (int _i1168 = 0; _i1168 < _list1166.size; ++_i1168) { - _elem1159 = iprot.readString(); - struct.success.add(_elem1159); + _elem1167 = iprot.readString(); + struct.success.add(_elem1167); } iprot.readListEnd(); } @@ -159286,9 +159814,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1161 : struct.success) + for (String _iter1169 : struct.success) { - oprot.writeString(_iter1161); + oprot.writeString(_iter1169); } oprot.writeListEnd(); } @@ -159319,9 +159847,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1162 : struct.success) + for (String _iter1170 : struct.success) { - oprot.writeString(_iter1162); + oprot.writeString(_iter1170); } } } @@ -159333,13 +159861,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1163 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1163.size); - String _elem1164; - for (int _i1165 = 0; _i1165 < _list1163.size; ++_i1165) + org.apache.thrift.protocol.TList _list1171 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1171.size); + String _elem1172; + for (int _i1173 = 0; _i1173 < _list1171.size; ++_i1173) { - _elem1164 = iprot.readString(); - struct.success.add(_elem1164); + _elem1172 = iprot.readString(); + struct.success.add(_elem1172); } } struct.setSuccessIsSet(true); @@ -165988,128 +166516,3244 @@ public String getFieldName() { return _fieldName; } } - - // isset id assignments + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(unlock_result.class, metaDataMap); + } + + public unlock_result() { + } + + public unlock_result( + NoSuchLockException o1, + TxnOpenException o2) + { + this(); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public unlock_result(unlock_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchLockException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new TxnOpenException(other.o2); + } + } + + public unlock_result deepCopy() { + return new unlock_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + } + + public NoSuchLockException getO1() { + return this.o1; + } + + public void setO1(NoSuchLockException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public TxnOpenException getO2() { + return this.o2; + } + + public void setO2(TxnOpenException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchLockException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((TxnOpenException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof unlock_result) + return this.equals((unlock_result)that); + return false; + } + + public boolean equals(unlock_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(unlock_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("unlock_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class unlock_resultStandardSchemeFactory implements SchemeFactory { + public unlock_resultStandardScheme getScheme() { + return new unlock_resultStandardScheme(); + } + } + + private static class unlock_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, unlock_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchLockException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new TxnOpenException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, unlock_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class unlock_resultTupleSchemeFactory implements SchemeFactory { + public unlock_resultTupleScheme getScheme() { + return new unlock_resultTupleScheme(); + } + } + + private static class unlock_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, unlock_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, unlock_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new NoSuchLockException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new TxnOpenException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + public static class show_locks_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_locks_args"); + + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new show_locks_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new show_locks_argsTupleSchemeFactory()); + } + + private ShowLocksRequest rqst; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RQST((short)1, "rqst"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RQST + return RQST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowLocksRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_locks_args.class, metaDataMap); + } + + public show_locks_args() { + } + + public show_locks_args( + ShowLocksRequest rqst) + { + this(); + this.rqst = rqst; + } + + /** + * Performs a deep copy on other. + */ + public show_locks_args(show_locks_args other) { + if (other.isSetRqst()) { + this.rqst = new ShowLocksRequest(other.rqst); + } + } + + public show_locks_args deepCopy() { + return new show_locks_args(this); + } + + @Override + public void clear() { + this.rqst = null; + } + + public ShowLocksRequest getRqst() { + return this.rqst; + } + + public void setRqst(ShowLocksRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((ShowLocksRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RQST: + return getRqst(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RQST: + return isSetRqst(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof show_locks_args) + return this.equals((show_locks_args)that); + return false; + } + + public boolean equals(show_locks_args that) { + if (that == null) + return false; + + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + + return list.hashCode(); + } + + @Override + public int compareTo(show_locks_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("show_locks_args("); + boolean first = true; + + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class show_locks_argsStandardSchemeFactory implements SchemeFactory { + public show_locks_argsStandardScheme getScheme() { + return new show_locks_argsStandardScheme(); + } + } + + private static class show_locks_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, show_locks_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new ShowLocksRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, show_locks_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class show_locks_argsTupleSchemeFactory implements SchemeFactory { + public show_locks_argsTupleScheme getScheme() { + return new show_locks_argsTupleScheme(); + } + } + + private static class show_locks_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, show_locks_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, show_locks_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new ShowLocksRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } + } + } + + } + + public static class show_locks_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_locks_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new show_locks_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new show_locks_resultTupleSchemeFactory()); + } + + private ShowLocksResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowLocksResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_locks_result.class, metaDataMap); + } + + public show_locks_result() { + } + + public show_locks_result( + ShowLocksResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public show_locks_result(show_locks_result other) { + if (other.isSetSuccess()) { + this.success = new ShowLocksResponse(other.success); + } + } + + public show_locks_result deepCopy() { + return new show_locks_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public ShowLocksResponse getSuccess() { + return this.success; + } + + public void setSuccess(ShowLocksResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((ShowLocksResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof show_locks_result) + return this.equals((show_locks_result)that); + return false; + } + + public boolean equals(show_locks_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(show_locks_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("show_locks_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class show_locks_resultStandardSchemeFactory implements SchemeFactory { + public show_locks_resultStandardScheme getScheme() { + return new show_locks_resultStandardScheme(); + } + } + + private static class show_locks_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, show_locks_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new ShowLocksResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, show_locks_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class show_locks_resultTupleSchemeFactory implements SchemeFactory { + public show_locks_resultTupleScheme getScheme() { + return new show_locks_resultTupleScheme(); + } + } + + private static class show_locks_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, show_locks_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, show_locks_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new ShowLocksResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + + public static class heartbeat_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_args"); + + private static final org.apache.thrift.protocol.TField IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("ids", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new heartbeat_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new heartbeat_argsTupleSchemeFactory()); + } + + private HeartbeatRequest ids; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + IDS((short)1, "ids"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // IDS + return IDS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.IDS, new org.apache.thrift.meta_data.FieldMetaData("ids", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_args.class, metaDataMap); + } + + public heartbeat_args() { + } + + public heartbeat_args( + HeartbeatRequest ids) + { + this(); + this.ids = ids; + } + + /** + * Performs a deep copy on other. + */ + public heartbeat_args(heartbeat_args other) { + if (other.isSetIds()) { + this.ids = new HeartbeatRequest(other.ids); + } + } + + public heartbeat_args deepCopy() { + return new heartbeat_args(this); + } + + @Override + public void clear() { + this.ids = null; + } + + public HeartbeatRequest getIds() { + return this.ids; + } + + public void setIds(HeartbeatRequest ids) { + this.ids = ids; + } + + public void unsetIds() { + this.ids = null; + } + + /** Returns true if field ids is set (has been assigned a value) and false otherwise */ + public boolean isSetIds() { + return this.ids != null; + } + + public void setIdsIsSet(boolean value) { + if (!value) { + this.ids = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case IDS: + if (value == null) { + unsetIds(); + } else { + setIds((HeartbeatRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case IDS: + return getIds(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case IDS: + return isSetIds(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof heartbeat_args) + return this.equals((heartbeat_args)that); + return false; + } + + public boolean equals(heartbeat_args that) { + if (that == null) + return false; + + boolean this_present_ids = true && this.isSetIds(); + boolean that_present_ids = true && that.isSetIds(); + if (this_present_ids || that_present_ids) { + if (!(this_present_ids && that_present_ids)) + return false; + if (!this.ids.equals(that.ids)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_ids = true && (isSetIds()); + list.add(present_ids); + if (present_ids) + list.add(ids); + + return list.hashCode(); + } + + @Override + public int compareTo(heartbeat_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetIds()).compareTo(other.isSetIds()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIds()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ids, other.ids); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("heartbeat_args("); + boolean first = true; + + sb.append("ids:"); + if (this.ids == null) { + sb.append("null"); + } else { + sb.append(this.ids); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (ids != null) { + ids.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class heartbeat_argsStandardSchemeFactory implements SchemeFactory { + public heartbeat_argsStandardScheme getScheme() { + return new heartbeat_argsStandardScheme(); + } + } + + private static class heartbeat_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // IDS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.ids = new HeartbeatRequest(); + struct.ids.read(iprot); + struct.setIdsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.ids != null) { + oprot.writeFieldBegin(IDS_FIELD_DESC); + struct.ids.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class heartbeat_argsTupleSchemeFactory implements SchemeFactory { + public heartbeat_argsTupleScheme getScheme() { + return new heartbeat_argsTupleScheme(); + } + } + + private static class heartbeat_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetIds()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetIds()) { + struct.ids.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.ids = new HeartbeatRequest(); + struct.ids.read(iprot); + struct.setIdsIsSet(true); + } + } + } + + } + + public static class heartbeat_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new heartbeat_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new heartbeat_resultTupleSchemeFactory()); + } + + private NoSuchLockException o1; // required + private NoSuchTxnException o2; // required + private TxnAbortedException o3; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"), + O2((short)2, "o2"), + O3((short)3, "o3"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + case 2: // O2 + return O2; + case 3: // O3 + return O3; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_result.class, metaDataMap); + } + + public heartbeat_result() { + } + + public heartbeat_result( + NoSuchLockException o1, + NoSuchTxnException o2, + TxnAbortedException o3) + { + this(); + this.o1 = o1; + this.o2 = o2; + this.o3 = o3; + } + + /** + * Performs a deep copy on other. + */ + public heartbeat_result(heartbeat_result other) { + if (other.isSetO1()) { + this.o1 = new NoSuchLockException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new NoSuchTxnException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new TxnAbortedException(other.o3); + } + } + + public heartbeat_result deepCopy() { + return new heartbeat_result(this); + } + + @Override + public void clear() { + this.o1 = null; + this.o2 = null; + this.o3 = null; + } + + public NoSuchLockException getO1() { + return this.o1; + } + + public void setO1(NoSuchLockException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public NoSuchTxnException getO2() { + return this.o2; + } + + public void setO2(NoSuchTxnException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public TxnAbortedException getO3() { + return this.o3; + } + + public void setO3(TxnAbortedException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchLockException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((NoSuchTxnException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((TxnAbortedException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + case O2: + return getO2(); + + case O3: + return getO3(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + case O2: + return isSetO2(); + case O3: + return isSetO3(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof heartbeat_result) + return this.equals((heartbeat_result)that); + return false; + } + + public boolean equals(heartbeat_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + boolean present_o3 = true && (isSetO3()); + list.add(present_o3); + if (present_o3) + list.add(o3); + + return list.hashCode(); + } + + @Override + public int compareTo(heartbeat_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("heartbeat_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class heartbeat_resultStandardSchemeFactory implements SchemeFactory { + public heartbeat_resultStandardScheme getScheme() { + return new heartbeat_resultStandardScheme(); + } + } + + private static class heartbeat_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchLockException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new NoSuchTxnException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new TxnAbortedException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class heartbeat_resultTupleSchemeFactory implements SchemeFactory { + public heartbeat_resultTupleScheme getScheme() { + return new heartbeat_resultTupleScheme(); + } + } + + private static class heartbeat_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + if (struct.isSetO3()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.o1 = new NoSuchLockException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new NoSuchTxnException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(2)) { + struct.o3 = new TxnAbortedException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + } + } + + } + + public static class heartbeat_txn_range_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_txn_range_args"); + + private static final org.apache.thrift.protocol.TField TXNS_FIELD_DESC = new org.apache.thrift.protocol.TField("txns", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new heartbeat_txn_range_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new heartbeat_txn_range_argsTupleSchemeFactory()); + } + + private HeartbeatTxnRangeRequest txns; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TXNS((short)1, "txns"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TXNS + return TXNS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TXNS, new org.apache.thrift.meta_data.FieldMetaData("txns", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatTxnRangeRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_txn_range_args.class, metaDataMap); + } + + public heartbeat_txn_range_args() { + } + + public heartbeat_txn_range_args( + HeartbeatTxnRangeRequest txns) + { + this(); + this.txns = txns; + } + + /** + * Performs a deep copy on other. + */ + public heartbeat_txn_range_args(heartbeat_txn_range_args other) { + if (other.isSetTxns()) { + this.txns = new HeartbeatTxnRangeRequest(other.txns); + } + } + + public heartbeat_txn_range_args deepCopy() { + return new heartbeat_txn_range_args(this); + } + + @Override + public void clear() { + this.txns = null; + } + + public HeartbeatTxnRangeRequest getTxns() { + return this.txns; + } + + public void setTxns(HeartbeatTxnRangeRequest txns) { + this.txns = txns; + } + + public void unsetTxns() { + this.txns = null; + } + + /** Returns true if field txns is set (has been assigned a value) and false otherwise */ + public boolean isSetTxns() { + return this.txns != null; + } + + public void setTxnsIsSet(boolean value) { + if (!value) { + this.txns = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TXNS: + if (value == null) { + unsetTxns(); + } else { + setTxns((HeartbeatTxnRangeRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TXNS: + return getTxns(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TXNS: + return isSetTxns(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof heartbeat_txn_range_args) + return this.equals((heartbeat_txn_range_args)that); + return false; + } + + public boolean equals(heartbeat_txn_range_args that) { + if (that == null) + return false; + + boolean this_present_txns = true && this.isSetTxns(); + boolean that_present_txns = true && that.isSetTxns(); + if (this_present_txns || that_present_txns) { + if (!(this_present_txns && that_present_txns)) + return false; + if (!this.txns.equals(that.txns)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_txns = true && (isSetTxns()); + list.add(present_txns); + if (present_txns) + list.add(txns); + + return list.hashCode(); + } + + @Override + public int compareTo(heartbeat_txn_range_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTxns()).compareTo(other.isSetTxns()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTxns()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txns, other.txns); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("heartbeat_txn_range_args("); + boolean first = true; + + sb.append("txns:"); + if (this.txns == null) { + sb.append("null"); + } else { + sb.append(this.txns); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (txns != null) { + txns.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class heartbeat_txn_range_argsStandardSchemeFactory implements SchemeFactory { + public heartbeat_txn_range_argsStandardScheme getScheme() { + return new heartbeat_txn_range_argsStandardScheme(); + } + } + + private static class heartbeat_txn_range_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_txn_range_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TXNS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.txns = new HeartbeatTxnRangeRequest(); + struct.txns.read(iprot); + struct.setTxnsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_txn_range_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.txns != null) { + oprot.writeFieldBegin(TXNS_FIELD_DESC); + struct.txns.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class heartbeat_txn_range_argsTupleSchemeFactory implements SchemeFactory { + public heartbeat_txn_range_argsTupleScheme getScheme() { + return new heartbeat_txn_range_argsTupleScheme(); + } + } + + private static class heartbeat_txn_range_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_txn_range_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetTxns()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetTxns()) { + struct.txns.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_txn_range_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.txns = new HeartbeatTxnRangeRequest(); + struct.txns.read(iprot); + struct.setTxnsIsSet(true); + } + } + } + + } + + public static class heartbeat_txn_range_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_txn_range_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new heartbeat_txn_range_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new heartbeat_txn_range_resultTupleSchemeFactory()); + } + + private HeartbeatTxnRangeResponse success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatTxnRangeResponse.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_txn_range_result.class, metaDataMap); + } + + public heartbeat_txn_range_result() { + } + + public heartbeat_txn_range_result( + HeartbeatTxnRangeResponse success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public heartbeat_txn_range_result(heartbeat_txn_range_result other) { + if (other.isSetSuccess()) { + this.success = new HeartbeatTxnRangeResponse(other.success); + } + } + + public heartbeat_txn_range_result deepCopy() { + return new heartbeat_txn_range_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public HeartbeatTxnRangeResponse getSuccess() { + return this.success; + } + + public void setSuccess(HeartbeatTxnRangeResponse success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((HeartbeatTxnRangeResponse)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof heartbeat_txn_range_result) + return this.equals((heartbeat_txn_range_result)that); + return false; + } + + public boolean equals(heartbeat_txn_range_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(heartbeat_txn_range_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("heartbeat_txn_range_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class heartbeat_txn_range_resultStandardSchemeFactory implements SchemeFactory { + public heartbeat_txn_range_resultStandardScheme getScheme() { + return new heartbeat_txn_range_resultStandardScheme(); + } + } + + private static class heartbeat_txn_range_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_txn_range_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new HeartbeatTxnRangeResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_txn_range_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class heartbeat_txn_range_resultTupleSchemeFactory implements SchemeFactory { + public heartbeat_txn_range_resultTupleScheme getScheme() { + return new heartbeat_txn_range_resultTupleScheme(); + } + } + + private static class heartbeat_txn_range_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_txn_range_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_txn_range_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new HeartbeatTxnRangeResponse(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + + public static class compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_args"); + + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new compact_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new compact_argsTupleSchemeFactory()); + } + + private CompactionRequest rqst; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + RQST((short)1, "rqst"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // RQST + return RQST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CompactionRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_args.class, metaDataMap); + } + + public compact_args() { + } + + public compact_args( + CompactionRequest rqst) + { + this(); + this.rqst = rqst; + } + + /** + * Performs a deep copy on other. + */ + public compact_args(compact_args other) { + if (other.isSetRqst()) { + this.rqst = new CompactionRequest(other.rqst); + } + } + + public compact_args deepCopy() { + return new compact_args(this); + } + + @Override + public void clear() { + this.rqst = null; + } + + public CompactionRequest getRqst() { + return this.rqst; + } + + public void setRqst(CompactionRequest rqst) { + this.rqst = rqst; + } + + public void unsetRqst() { + this.rqst = null; + } + + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; + } + + public void setRqstIsSet(boolean value) { + if (!value) { + this.rqst = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case RQST: + if (value == null) { + unsetRqst(); + } else { + setRqst((CompactionRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case RQST: + return getRqst(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case RQST: + return isSetRqst(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof compact_args) + return this.equals((compact_args)that); + return false; + } + + public boolean equals(compact_args that) { + if (that == null) + return false; + + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) + return false; + if (!this.rqst.equals(that.rqst)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); + + return list.hashCode(); + } + + @Override + public int compareTo(compact_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("compact_args("); + boolean first = true; + + sb.append("rqst:"); + if (this.rqst == null) { + sb.append("null"); + } else { + sb.append(this.rqst); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (rqst != null) { + rqst.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class compact_argsStandardSchemeFactory implements SchemeFactory { + public compact_argsStandardScheme getScheme() { + return new compact_argsStandardScheme(); + } + } + + private static class compact_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // RQST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.rqst = new CompactionRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class compact_argsTupleSchemeFactory implements SchemeFactory { + public compact_argsTupleScheme getScheme() { + return new compact_argsTupleScheme(); + } + } + + private static class compact_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRqst()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.rqst = new CompactionRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); + } + } + } + + } + + public static class compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_result"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new compact_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new compact_resultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(unlock_result.class, metaDataMap); - } - - public unlock_result() { + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_result.class, metaDataMap); } - public unlock_result( - NoSuchLockException o1, - TxnOpenException o2) - { - this(); - this.o1 = o1; - this.o2 = o2; + public compact_result() { } /** * Performs a deep copy on other. */ - public unlock_result(unlock_result other) { - if (other.isSetO1()) { - this.o1 = new NoSuchLockException(other.o1); - } - if (other.isSetO2()) { - this.o2 = new TxnOpenException(other.o2); - } + public compact_result(compact_result other) { } - public unlock_result deepCopy() { - return new unlock_result(this); + public compact_result deepCopy() { + return new compact_result(this); } @Override public void clear() { - this.o1 = null; - this.o2 = null; - } - - public NoSuchLockException getO1() { - return this.o1; - } - - public void setO1(NoSuchLockException o1) { - this.o1 = o1; - } - - public void unsetO1() { - this.o1 = null; - } - - /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ - public boolean isSetO1() { - return this.o1 != null; - } - - public void setO1IsSet(boolean value) { - if (!value) { - this.o1 = null; - } - } - - public TxnOpenException getO2() { - return this.o2; - } - - public void setO2(TxnOpenException o2) { - this.o2 = o2; - } - - public void unsetO2() { - this.o2 = null; - } - - /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ - public boolean isSetO2() { - return this.o2 != null; - } - - public void setO2IsSet(boolean value) { - if (!value) { - this.o2 = null; - } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case O1: - if (value == null) { - unsetO1(); - } else { - setO1((NoSuchLockException)value); - } - break; - - case O2: - if (value == null) { - unsetO2(); - } else { - setO2((TxnOpenException)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case O1: - return getO1(); - - case O2: - return getO2(); - } throw new IllegalStateException(); } @@ -166121,10 +169765,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case O1: - return isSetO1(); - case O2: - return isSetO2(); } throw new IllegalStateException(); } @@ -166133,33 +169773,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof unlock_result) - return this.equals((unlock_result)that); + if (that instanceof compact_result) + return this.equals((compact_result)that); return false; } - public boolean equals(unlock_result that) { + public boolean equals(compact_result that) { if (that == null) return false; - boolean this_present_o1 = true && this.isSetO1(); - boolean that_present_o1 = true && that.isSetO1(); - if (this_present_o1 || that_present_o1) { - if (!(this_present_o1 && that_present_o1)) - return false; - if (!this.o1.equals(that.o1)) - return false; - } - - boolean this_present_o2 = true && this.isSetO2(); - boolean that_present_o2 = true && that.isSetO2(); - if (this_present_o2 || that_present_o2) { - if (!(this_present_o2 && that_present_o2)) - return false; - if (!this.o2.equals(that.o2)) - return false; - } - return true; } @@ -166167,47 +169789,17 @@ public boolean equals(unlock_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_o1 = true && (isSetO1()); - list.add(present_o1); - if (present_o1) - list.add(o1); - - boolean present_o2 = true && (isSetO2()); - list.add(present_o2); - if (present_o2) - list.add(o2); - return list.hashCode(); } @Override - public int compareTo(unlock_result other) { + public int compareTo(compact_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO1()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO2()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -166225,24 +169817,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("unlock_result("); + StringBuilder sb = new StringBuilder("compact_result("); boolean first = true; - sb.append("o1:"); - if (this.o1 == null) { - sb.append("null"); - } else { - sb.append(this.o1); - } - first = false; - if (!first) sb.append(", "); - sb.append("o2:"); - if (this.o2 == null) { - sb.append("null"); - } else { - sb.append(this.o2); - } - first = false; sb.append(")"); return sb.toString(); } @@ -166268,15 +169845,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class unlock_resultStandardSchemeFactory implements SchemeFactory { - public unlock_resultStandardScheme getScheme() { - return new unlock_resultStandardScheme(); + private static class compact_resultStandardSchemeFactory implements SchemeFactory { + public compact_resultStandardScheme getScheme() { + return new compact_resultStandardScheme(); } } - private static class unlock_resultStandardScheme extends StandardScheme { + private static class compact_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, unlock_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -166286,24 +169863,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, unlock_result struc break; } switch (schemeField.id) { - case 1: // O1 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchLockException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // O2 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new TxnOpenException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -166313,84 +169872,49 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, unlock_result struc struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, unlock_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.o1 != null) { - oprot.writeFieldBegin(O1_FIELD_DESC); - struct.o1.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.o2 != null) { - oprot.writeFieldBegin(O2_FIELD_DESC); - struct.o2.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class unlock_resultTupleSchemeFactory implements SchemeFactory { - public unlock_resultTupleScheme getScheme() { - return new unlock_resultTupleScheme(); + private static class compact_resultTupleSchemeFactory implements SchemeFactory { + public compact_resultTupleScheme getScheme() { + return new compact_resultTupleScheme(); } } - private static class unlock_resultTupleScheme extends TupleScheme { + private static class compact_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, unlock_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetO1()) { - optionals.set(0); - } - if (struct.isSetO2()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetO1()) { - struct.o1.write(oprot); - } - if (struct.isSetO2()) { - struct.o2.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, unlock_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.o1 = new NoSuchLockException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } - if (incoming.get(1)) { - struct.o2 = new TxnOpenException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); - } } } } - public static class show_locks_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_locks_args"); + public static class show_compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new show_locks_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new show_locks_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new show_compact_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new show_compact_argsTupleSchemeFactory()); } - private ShowLocksRequest rqst; // required + private ShowCompactRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -166455,16 +169979,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowLocksRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_locks_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_args.class, metaDataMap); } - public show_locks_args() { + public show_compact_args() { } - public show_locks_args( - ShowLocksRequest rqst) + public show_compact_args( + ShowCompactRequest rqst) { this(); this.rqst = rqst; @@ -166473,14 +169997,14 @@ public show_locks_args( /** * Performs a deep copy on other. */ - public show_locks_args(show_locks_args other) { + public show_compact_args(show_compact_args other) { if (other.isSetRqst()) { - this.rqst = new ShowLocksRequest(other.rqst); + this.rqst = new ShowCompactRequest(other.rqst); } } - public show_locks_args deepCopy() { - return new show_locks_args(this); + public show_compact_args deepCopy() { + return new show_compact_args(this); } @Override @@ -166488,11 +170012,11 @@ public void clear() { this.rqst = null; } - public ShowLocksRequest getRqst() { + public ShowCompactRequest getRqst() { return this.rqst; } - public void setRqst(ShowLocksRequest rqst) { + public void setRqst(ShowCompactRequest rqst) { this.rqst = rqst; } @@ -166517,7 +170041,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((ShowLocksRequest)value); + setRqst((ShowCompactRequest)value); } break; @@ -166550,12 +170074,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof show_locks_args) - return this.equals((show_locks_args)that); + if (that instanceof show_compact_args) + return this.equals((show_compact_args)that); return false; } - public boolean equals(show_locks_args that) { + public boolean equals(show_compact_args that) { if (that == null) return false; @@ -166584,7 +170108,7 @@ public int hashCode() { } @Override - public int compareTo(show_locks_args other) { + public int compareTo(show_compact_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -166618,7 +170142,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("show_locks_args("); + StringBuilder sb = new StringBuilder("show_compact_args("); boolean first = true; sb.append("rqst:"); @@ -166656,15 +170180,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class show_locks_argsStandardSchemeFactory implements SchemeFactory { - public show_locks_argsStandardScheme getScheme() { - return new show_locks_argsStandardScheme(); + private static class show_compact_argsStandardSchemeFactory implements SchemeFactory { + public show_compact_argsStandardScheme getScheme() { + return new show_compact_argsStandardScheme(); } } - private static class show_locks_argsStandardScheme extends StandardScheme { + private static class show_compact_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, show_locks_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -166676,7 +170200,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_locks_args str switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new ShowLocksRequest(); + struct.rqst = new ShowCompactRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -166692,7 +170216,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_locks_args str struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, show_locks_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -166707,16 +170231,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, show_locks_args st } - private static class show_locks_argsTupleSchemeFactory implements SchemeFactory { - public show_locks_argsTupleScheme getScheme() { - return new show_locks_argsTupleScheme(); + private static class show_compact_argsTupleSchemeFactory implements SchemeFactory { + public show_compact_argsTupleScheme getScheme() { + return new show_compact_argsTupleScheme(); } } - private static class show_locks_argsTupleScheme extends TupleScheme { + private static class show_compact_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, show_locks_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -166729,11 +170253,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, show_locks_args str } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, show_locks_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new ShowLocksRequest(); + struct.rqst = new ShowCompactRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -166742,18 +170266,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, show_locks_args stru } - public static class show_locks_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_locks_result"); + public static class show_compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new show_locks_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new show_locks_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new show_compact_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new show_compact_resultTupleSchemeFactory()); } - private ShowLocksResponse success; // required + private ShowCompactResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -166818,16 +170342,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowLocksResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_locks_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_result.class, metaDataMap); } - public show_locks_result() { + public show_compact_result() { } - public show_locks_result( - ShowLocksResponse success) + public show_compact_result( + ShowCompactResponse success) { this(); this.success = success; @@ -166836,14 +170360,14 @@ public show_locks_result( /** * Performs a deep copy on other. */ - public show_locks_result(show_locks_result other) { + public show_compact_result(show_compact_result other) { if (other.isSetSuccess()) { - this.success = new ShowLocksResponse(other.success); + this.success = new ShowCompactResponse(other.success); } } - public show_locks_result deepCopy() { - return new show_locks_result(this); + public show_compact_result deepCopy() { + return new show_compact_result(this); } @Override @@ -166851,11 +170375,11 @@ public void clear() { this.success = null; } - public ShowLocksResponse getSuccess() { + public ShowCompactResponse getSuccess() { return this.success; } - public void setSuccess(ShowLocksResponse success) { + public void setSuccess(ShowCompactResponse success) { this.success = success; } @@ -166880,7 +170404,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((ShowLocksResponse)value); + setSuccess((ShowCompactResponse)value); } break; @@ -166913,12 +170437,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof show_locks_result) - return this.equals((show_locks_result)that); + if (that instanceof show_compact_result) + return this.equals((show_compact_result)that); return false; } - public boolean equals(show_locks_result that) { + public boolean equals(show_compact_result that) { if (that == null) return false; @@ -166947,7 +170471,7 @@ public int hashCode() { } @Override - public int compareTo(show_locks_result other) { + public int compareTo(show_compact_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -166981,7 +170505,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("show_locks_result("); + StringBuilder sb = new StringBuilder("show_compact_result("); boolean first = true; sb.append("success:"); @@ -167019,15 +170543,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class show_locks_resultStandardSchemeFactory implements SchemeFactory { - public show_locks_resultStandardScheme getScheme() { - return new show_locks_resultStandardScheme(); + private static class show_compact_resultStandardSchemeFactory implements SchemeFactory { + public show_compact_resultStandardScheme getScheme() { + return new show_compact_resultStandardScheme(); } } - private static class show_locks_resultStandardScheme extends StandardScheme { + private static class show_compact_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, show_locks_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -167039,7 +170563,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_locks_result s switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new ShowLocksResponse(); + struct.success = new ShowCompactResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -167055,7 +170579,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_locks_result s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, show_locks_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -167070,16 +170594,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, show_locks_result } - private static class show_locks_resultTupleSchemeFactory implements SchemeFactory { - public show_locks_resultTupleScheme getScheme() { - return new show_locks_resultTupleScheme(); + private static class show_compact_resultTupleSchemeFactory implements SchemeFactory { + public show_compact_resultTupleScheme getScheme() { + return new show_compact_resultTupleScheme(); } } - private static class show_locks_resultTupleScheme extends TupleScheme { + private static class show_compact_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, show_locks_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -167092,11 +170616,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, show_locks_result s } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, show_locks_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new ShowLocksResponse(); + struct.success = new ShowCompactResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -167105,22 +170629,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, show_locks_result st } - public static class heartbeat_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_args"); + public static class add_dynamic_partitions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_args"); - private static final org.apache.thrift.protocol.TField IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("ids", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new heartbeat_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new heartbeat_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new add_dynamic_partitions_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_dynamic_partitions_argsTupleSchemeFactory()); } - private HeartbeatRequest ids; // required + private AddDynamicPartitions rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - IDS((short)1, "ids"); + RQST((short)1, "rqst"); private static final Map byName = new HashMap(); @@ -167135,8 +170659,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // IDS - return IDS; + case 1: // RQST + return RQST; default: return null; } @@ -167180,70 +170704,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.IDS, new org.apache.thrift.meta_data.FieldMetaData("ids", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatRequest.class))); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AddDynamicPartitions.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_args.class, metaDataMap); } - public heartbeat_args() { + public add_dynamic_partitions_args() { } - public heartbeat_args( - HeartbeatRequest ids) + public add_dynamic_partitions_args( + AddDynamicPartitions rqst) { this(); - this.ids = ids; + this.rqst = rqst; } /** * Performs a deep copy on other. */ - public heartbeat_args(heartbeat_args other) { - if (other.isSetIds()) { - this.ids = new HeartbeatRequest(other.ids); + public add_dynamic_partitions_args(add_dynamic_partitions_args other) { + if (other.isSetRqst()) { + this.rqst = new AddDynamicPartitions(other.rqst); } } - public heartbeat_args deepCopy() { - return new heartbeat_args(this); + public add_dynamic_partitions_args deepCopy() { + return new add_dynamic_partitions_args(this); } @Override public void clear() { - this.ids = null; + this.rqst = null; } - public HeartbeatRequest getIds() { - return this.ids; + public AddDynamicPartitions getRqst() { + return this.rqst; } - public void setIds(HeartbeatRequest ids) { - this.ids = ids; + public void setRqst(AddDynamicPartitions rqst) { + this.rqst = rqst; } - public void unsetIds() { - this.ids = null; + public void unsetRqst() { + this.rqst = null; } - /** Returns true if field ids is set (has been assigned a value) and false otherwise */ - public boolean isSetIds() { - return this.ids != null; + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; } - public void setIdsIsSet(boolean value) { + public void setRqstIsSet(boolean value) { if (!value) { - this.ids = null; + this.rqst = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case IDS: + case RQST: if (value == null) { - unsetIds(); + unsetRqst(); } else { - setIds((HeartbeatRequest)value); + setRqst((AddDynamicPartitions)value); } break; @@ -167252,8 +170776,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case IDS: - return getIds(); + case RQST: + return getRqst(); } throw new IllegalStateException(); @@ -167266,8 +170790,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case IDS: - return isSetIds(); + case RQST: + return isSetRqst(); } throw new IllegalStateException(); } @@ -167276,21 +170800,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof heartbeat_args) - return this.equals((heartbeat_args)that); + if (that instanceof add_dynamic_partitions_args) + return this.equals((add_dynamic_partitions_args)that); return false; } - public boolean equals(heartbeat_args that) { + public boolean equals(add_dynamic_partitions_args that) { if (that == null) return false; - boolean this_present_ids = true && this.isSetIds(); - boolean that_present_ids = true && that.isSetIds(); - if (this_present_ids || that_present_ids) { - if (!(this_present_ids && that_present_ids)) + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) return false; - if (!this.ids.equals(that.ids)) + if (!this.rqst.equals(that.rqst)) return false; } @@ -167301,28 +170825,28 @@ public boolean equals(heartbeat_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_ids = true && (isSetIds()); - list.add(present_ids); - if (present_ids) - list.add(ids); + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); return list.hashCode(); } @Override - public int compareTo(heartbeat_args other) { + public int compareTo(add_dynamic_partitions_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetIds()).compareTo(other.isSetIds()); + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); if (lastComparison != 0) { return lastComparison; } - if (isSetIds()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ids, other.ids); + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); if (lastComparison != 0) { return lastComparison; } @@ -167344,14 +170868,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("heartbeat_args("); + StringBuilder sb = new StringBuilder("add_dynamic_partitions_args("); boolean first = true; - sb.append("ids:"); - if (this.ids == null) { + sb.append("rqst:"); + if (this.rqst == null) { sb.append("null"); } else { - sb.append(this.ids); + sb.append(this.rqst); } first = false; sb.append(")"); @@ -167361,8 +170885,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (ids != null) { - ids.validate(); + if (rqst != null) { + rqst.validate(); } } @@ -167382,15 +170906,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class heartbeat_argsStandardSchemeFactory implements SchemeFactory { - public heartbeat_argsStandardScheme getScheme() { - return new heartbeat_argsStandardScheme(); + private static class add_dynamic_partitions_argsStandardSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_argsStandardScheme getScheme() { + return new add_dynamic_partitions_argsStandardScheme(); } } - private static class heartbeat_argsStandardScheme extends StandardScheme { + private static class add_dynamic_partitions_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -167400,11 +170924,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_args stru break; } switch (schemeField.id) { - case 1: // IDS + case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.ids = new HeartbeatRequest(); - struct.ids.read(iprot); - struct.setIdsIsSet(true); + struct.rqst = new AddDynamicPartitions(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -167418,13 +170942,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_args stru struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.ids != null) { - oprot.writeFieldBegin(IDS_FIELD_DESC); - struct.ids.write(oprot); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -167433,63 +170957,60 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_args str } - private static class heartbeat_argsTupleSchemeFactory implements SchemeFactory { - public heartbeat_argsTupleScheme getScheme() { - return new heartbeat_argsTupleScheme(); + private static class add_dynamic_partitions_argsTupleSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_argsTupleScheme getScheme() { + return new add_dynamic_partitions_argsTupleScheme(); } } - private static class heartbeat_argsTupleScheme extends TupleScheme { + private static class add_dynamic_partitions_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetIds()) { + if (struct.isSetRqst()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetIds()) { - struct.ids.write(oprot); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.ids = new HeartbeatRequest(); - struct.ids.read(iprot); - struct.setIdsIsSet(true); + struct.rqst = new AddDynamicPartitions(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); } } } } - public static class heartbeat_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_result"); + public static class add_dynamic_partitions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_result"); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); - private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new heartbeat_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new heartbeat_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new add_dynamic_partitions_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new add_dynamic_partitions_resultTupleSchemeFactory()); } - private NoSuchLockException o1; // required - private NoSuchTxnException o2; // required - private TxnAbortedException o3; // required + private NoSuchTxnException o1; // required + private TxnAbortedException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { O1((short)1, "o1"), - O2((short)2, "o2"), - O3((short)3, "o3"); + O2((short)2, "o2"); private static final Map byName = new HashMap(); @@ -167508,8 +171029,6 @@ public static _Fields findByThriftId(int fieldId) { return O1; case 2: // O2 return O2; - case 3: // O3 - return O3; default: return null; } @@ -167557,57 +171076,49 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_result.class, metaDataMap); } - public heartbeat_result() { + public add_dynamic_partitions_result() { } - public heartbeat_result( - NoSuchLockException o1, - NoSuchTxnException o2, - TxnAbortedException o3) + public add_dynamic_partitions_result( + NoSuchTxnException o1, + TxnAbortedException o2) { this(); this.o1 = o1; this.o2 = o2; - this.o3 = o3; } /** * Performs a deep copy on other. */ - public heartbeat_result(heartbeat_result other) { + public add_dynamic_partitions_result(add_dynamic_partitions_result other) { if (other.isSetO1()) { - this.o1 = new NoSuchLockException(other.o1); + this.o1 = new NoSuchTxnException(other.o1); } if (other.isSetO2()) { - this.o2 = new NoSuchTxnException(other.o2); - } - if (other.isSetO3()) { - this.o3 = new TxnAbortedException(other.o3); + this.o2 = new TxnAbortedException(other.o2); } } - public heartbeat_result deepCopy() { - return new heartbeat_result(this); + public add_dynamic_partitions_result deepCopy() { + return new add_dynamic_partitions_result(this); } @Override public void clear() { this.o1 = null; this.o2 = null; - this.o3 = null; } - public NoSuchLockException getO1() { + public NoSuchTxnException getO1() { return this.o1; } - public void setO1(NoSuchLockException o1) { + public void setO1(NoSuchTxnException o1) { this.o1 = o1; } @@ -167626,11 +171137,11 @@ public void setO1IsSet(boolean value) { } } - public NoSuchTxnException getO2() { + public TxnAbortedException getO2() { return this.o2; } - public void setO2(NoSuchTxnException o2) { + public void setO2(TxnAbortedException o2) { this.o2 = o2; } @@ -167649,36 +171160,13 @@ public void setO2IsSet(boolean value) { } } - public TxnAbortedException getO3() { - return this.o3; - } - - public void setO3(TxnAbortedException o3) { - this.o3 = o3; - } - - public void unsetO3() { - this.o3 = null; - } - - /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ - public boolean isSetO3() { - return this.o3 != null; - } - - public void setO3IsSet(boolean value) { - if (!value) { - this.o3 = null; - } - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case O1: if (value == null) { unsetO1(); } else { - setO1((NoSuchLockException)value); + setO1((NoSuchTxnException)value); } break; @@ -167686,15 +171174,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((NoSuchTxnException)value); - } - break; - - case O3: - if (value == null) { - unsetO3(); - } else { - setO3((TxnAbortedException)value); + setO2((TxnAbortedException)value); } break; @@ -167709,9 +171189,6 @@ public Object getFieldValue(_Fields field) { case O2: return getO2(); - case O3: - return getO3(); - } throw new IllegalStateException(); } @@ -167727,8 +171204,6 @@ public boolean isSet(_Fields field) { return isSetO1(); case O2: return isSetO2(); - case O3: - return isSetO3(); } throw new IllegalStateException(); } @@ -167737,12 +171212,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof heartbeat_result) - return this.equals((heartbeat_result)that); + if (that instanceof add_dynamic_partitions_result) + return this.equals((add_dynamic_partitions_result)that); return false; } - public boolean equals(heartbeat_result that) { + public boolean equals(add_dynamic_partitions_result that) { if (that == null) return false; @@ -167764,15 +171239,6 @@ public boolean equals(heartbeat_result that) { return false; } - boolean this_present_o3 = true && this.isSetO3(); - boolean that_present_o3 = true && that.isSetO3(); - if (this_present_o3 || that_present_o3) { - if (!(this_present_o3 && that_present_o3)) - return false; - if (!this.o3.equals(that.o3)) - return false; - } - return true; } @@ -167790,16 +171256,11 @@ public int hashCode() { if (present_o2) list.add(o2); - boolean present_o3 = true && (isSetO3()); - list.add(present_o3); - if (present_o3) - list.add(o3); - return list.hashCode(); } @Override - public int compareTo(heartbeat_result other) { + public int compareTo(add_dynamic_partitions_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -167826,16 +171287,6 @@ public int compareTo(heartbeat_result other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetO3()).compareTo(other.isSetO3()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO3()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, other.o3); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -167853,7 +171304,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("heartbeat_result("); + StringBuilder sb = new StringBuilder("add_dynamic_partitions_result("); boolean first = true; sb.append("o1:"); @@ -167871,14 +171322,6 @@ public String toString() { sb.append(this.o2); } first = false; - if (!first) sb.append(", "); - sb.append("o3:"); - if (this.o3 == null) { - sb.append("null"); - } else { - sb.append(this.o3); - } - first = false; sb.append(")"); return sb.toString(); } @@ -167904,15 +171347,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class heartbeat_resultStandardSchemeFactory implements SchemeFactory { - public heartbeat_resultStandardScheme getScheme() { - return new heartbeat_resultStandardScheme(); + private static class add_dynamic_partitions_resultStandardSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_resultStandardScheme getScheme() { + return new add_dynamic_partitions_resultStandardScheme(); } } - private static class heartbeat_resultStandardScheme extends StandardScheme { + private static class add_dynamic_partitions_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -167924,7 +171367,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_result st switch (schemeField.id) { case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchLockException(); + struct.o1 = new NoSuchTxnException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -167933,22 +171376,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_result st break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new NoSuchTxnException(); + struct.o2 = new TxnAbortedException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // O3 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o3 = new TxnAbortedException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -167958,7 +171392,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_result st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -167972,27 +171406,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_result s struct.o2.write(oprot); oprot.writeFieldEnd(); } - if (struct.o3 != null) { - oprot.writeFieldBegin(O3_FIELD_DESC); - struct.o3.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class heartbeat_resultTupleSchemeFactory implements SchemeFactory { - public heartbeat_resultTupleScheme getScheme() { - return new heartbeat_resultTupleScheme(); + private static class add_dynamic_partitions_resultTupleSchemeFactory implements SchemeFactory { + public add_dynamic_partitions_resultTupleScheme getScheme() { + return new add_dynamic_partitions_resultTupleScheme(); } } - private static class heartbeat_resultTupleScheme extends TupleScheme { + private static class add_dynamic_partitions_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetO1()) { @@ -168001,61 +171430,50 @@ public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_result st if (struct.isSetO2()) { optionals.set(1); } - if (struct.isSetO3()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); + oprot.writeBitSet(optionals, 2); if (struct.isSetO1()) { struct.o1.write(oprot); } if (struct.isSetO2()) { struct.o2.write(oprot); } - if (struct.isSetO3()) { - struct.o3.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - struct.o1 = new NoSuchLockException(); + struct.o1 = new NoSuchTxnException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(1)) { - struct.o2 = new NoSuchTxnException(); + struct.o2 = new TxnAbortedException(); struct.o2.read(iprot); struct.setO2IsSet(true); } - if (incoming.get(2)) { - struct.o3 = new TxnAbortedException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); - } } } } - public static class heartbeat_txn_range_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_txn_range_args"); + public static class get_next_notification_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_args"); - private static final org.apache.thrift.protocol.TField TXNS_FIELD_DESC = new org.apache.thrift.protocol.TField("txns", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new heartbeat_txn_range_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new heartbeat_txn_range_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_next_notification_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_notification_argsTupleSchemeFactory()); } - private HeartbeatTxnRangeRequest txns; // required + private NotificationEventRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TXNS((short)1, "txns"); + RQST((short)1, "rqst"); private static final Map byName = new HashMap(); @@ -168070,8 +171488,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // TXNS - return TXNS; + case 1: // RQST + return RQST; default: return null; } @@ -168115,70 +171533,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.TXNS, new org.apache.thrift.meta_data.FieldMetaData("txns", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatTxnRangeRequest.class))); + tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_txn_range_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_args.class, metaDataMap); } - public heartbeat_txn_range_args() { + public get_next_notification_args() { } - public heartbeat_txn_range_args( - HeartbeatTxnRangeRequest txns) + public get_next_notification_args( + NotificationEventRequest rqst) { this(); - this.txns = txns; + this.rqst = rqst; } /** * Performs a deep copy on other. */ - public heartbeat_txn_range_args(heartbeat_txn_range_args other) { - if (other.isSetTxns()) { - this.txns = new HeartbeatTxnRangeRequest(other.txns); + public get_next_notification_args(get_next_notification_args other) { + if (other.isSetRqst()) { + this.rqst = new NotificationEventRequest(other.rqst); } } - public heartbeat_txn_range_args deepCopy() { - return new heartbeat_txn_range_args(this); + public get_next_notification_args deepCopy() { + return new get_next_notification_args(this); } @Override public void clear() { - this.txns = null; + this.rqst = null; } - public HeartbeatTxnRangeRequest getTxns() { - return this.txns; + public NotificationEventRequest getRqst() { + return this.rqst; } - public void setTxns(HeartbeatTxnRangeRequest txns) { - this.txns = txns; + public void setRqst(NotificationEventRequest rqst) { + this.rqst = rqst; } - public void unsetTxns() { - this.txns = null; + public void unsetRqst() { + this.rqst = null; } - /** Returns true if field txns is set (has been assigned a value) and false otherwise */ - public boolean isSetTxns() { - return this.txns != null; + /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ + public boolean isSetRqst() { + return this.rqst != null; } - public void setTxnsIsSet(boolean value) { + public void setRqstIsSet(boolean value) { if (!value) { - this.txns = null; + this.rqst = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case TXNS: + case RQST: if (value == null) { - unsetTxns(); + unsetRqst(); } else { - setTxns((HeartbeatTxnRangeRequest)value); + setRqst((NotificationEventRequest)value); } break; @@ -168187,8 +171605,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case TXNS: - return getTxns(); + case RQST: + return getRqst(); } throw new IllegalStateException(); @@ -168201,8 +171619,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case TXNS: - return isSetTxns(); + case RQST: + return isSetRqst(); } throw new IllegalStateException(); } @@ -168211,21 +171629,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof heartbeat_txn_range_args) - return this.equals((heartbeat_txn_range_args)that); + if (that instanceof get_next_notification_args) + return this.equals((get_next_notification_args)that); return false; } - public boolean equals(heartbeat_txn_range_args that) { + public boolean equals(get_next_notification_args that) { if (that == null) return false; - boolean this_present_txns = true && this.isSetTxns(); - boolean that_present_txns = true && that.isSetTxns(); - if (this_present_txns || that_present_txns) { - if (!(this_present_txns && that_present_txns)) + boolean this_present_rqst = true && this.isSetRqst(); + boolean that_present_rqst = true && that.isSetRqst(); + if (this_present_rqst || that_present_rqst) { + if (!(this_present_rqst && that_present_rqst)) return false; - if (!this.txns.equals(that.txns)) + if (!this.rqst.equals(that.rqst)) return false; } @@ -168236,28 +171654,28 @@ public boolean equals(heartbeat_txn_range_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_txns = true && (isSetTxns()); - list.add(present_txns); - if (present_txns) - list.add(txns); + boolean present_rqst = true && (isSetRqst()); + list.add(present_rqst); + if (present_rqst) + list.add(rqst); return list.hashCode(); } @Override - public int compareTo(heartbeat_txn_range_args other) { + public int compareTo(get_next_notification_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetTxns()).compareTo(other.isSetTxns()); + lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); if (lastComparison != 0) { return lastComparison; } - if (isSetTxns()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txns, other.txns); + if (isSetRqst()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); if (lastComparison != 0) { return lastComparison; } @@ -168279,14 +171697,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("heartbeat_txn_range_args("); + StringBuilder sb = new StringBuilder("get_next_notification_args("); boolean first = true; - sb.append("txns:"); - if (this.txns == null) { + sb.append("rqst:"); + if (this.rqst == null) { sb.append("null"); } else { - sb.append(this.txns); + sb.append(this.rqst); } first = false; sb.append(")"); @@ -168296,8 +171714,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (txns != null) { - txns.validate(); + if (rqst != null) { + rqst.validate(); } } @@ -168317,15 +171735,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class heartbeat_txn_range_argsStandardSchemeFactory implements SchemeFactory { - public heartbeat_txn_range_argsStandardScheme getScheme() { - return new heartbeat_txn_range_argsStandardScheme(); + private static class get_next_notification_argsStandardSchemeFactory implements SchemeFactory { + public get_next_notification_argsStandardScheme getScheme() { + return new get_next_notification_argsStandardScheme(); } } - private static class heartbeat_txn_range_argsStandardScheme extends StandardScheme { + private static class get_next_notification_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_txn_range_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -168335,11 +171753,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_txn_range break; } switch (schemeField.id) { - case 1: // TXNS + case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.txns = new HeartbeatTxnRangeRequest(); - struct.txns.read(iprot); - struct.setTxnsIsSet(true); + struct.rqst = new NotificationEventRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -168353,13 +171771,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_txn_range struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_txn_range_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.txns != null) { - oprot.writeFieldBegin(TXNS_FIELD_DESC); - struct.txns.write(oprot); + if (struct.rqst != null) { + oprot.writeFieldBegin(RQST_FIELD_DESC); + struct.rqst.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -168368,53 +171786,53 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_txn_rang } - private static class heartbeat_txn_range_argsTupleSchemeFactory implements SchemeFactory { - public heartbeat_txn_range_argsTupleScheme getScheme() { - return new heartbeat_txn_range_argsTupleScheme(); + private static class get_next_notification_argsTupleSchemeFactory implements SchemeFactory { + public get_next_notification_argsTupleScheme getScheme() { + return new get_next_notification_argsTupleScheme(); } } - private static class heartbeat_txn_range_argsTupleScheme extends TupleScheme { + private static class get_next_notification_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_txn_range_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetTxns()) { + if (struct.isSetRqst()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetTxns()) { - struct.txns.write(oprot); + if (struct.isSetRqst()) { + struct.rqst.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_txn_range_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.txns = new HeartbeatTxnRangeRequest(); - struct.txns.read(iprot); - struct.setTxnsIsSet(true); + struct.rqst = new NotificationEventRequest(); + struct.rqst.read(iprot); + struct.setRqstIsSet(true); } } } } - public static class heartbeat_txn_range_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_txn_range_result"); + public static class get_next_notification_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new heartbeat_txn_range_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new heartbeat_txn_range_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_next_notification_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_notification_resultTupleSchemeFactory()); } - private HeartbeatTxnRangeResponse success; // required + private NotificationEventResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -168479,16 +171897,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatTxnRangeResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_txn_range_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_result.class, metaDataMap); } - public heartbeat_txn_range_result() { + public get_next_notification_result() { } - public heartbeat_txn_range_result( - HeartbeatTxnRangeResponse success) + public get_next_notification_result( + NotificationEventResponse success) { this(); this.success = success; @@ -168497,14 +171915,14 @@ public heartbeat_txn_range_result( /** * Performs a deep copy on other. */ - public heartbeat_txn_range_result(heartbeat_txn_range_result other) { + public get_next_notification_result(get_next_notification_result other) { if (other.isSetSuccess()) { - this.success = new HeartbeatTxnRangeResponse(other.success); + this.success = new NotificationEventResponse(other.success); } } - public heartbeat_txn_range_result deepCopy() { - return new heartbeat_txn_range_result(this); + public get_next_notification_result deepCopy() { + return new get_next_notification_result(this); } @Override @@ -168512,11 +171930,11 @@ public void clear() { this.success = null; } - public HeartbeatTxnRangeResponse getSuccess() { + public NotificationEventResponse getSuccess() { return this.success; } - public void setSuccess(HeartbeatTxnRangeResponse success) { + public void setSuccess(NotificationEventResponse success) { this.success = success; } @@ -168541,7 +171959,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((HeartbeatTxnRangeResponse)value); + setSuccess((NotificationEventResponse)value); } break; @@ -168574,12 +171992,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof heartbeat_txn_range_result) - return this.equals((heartbeat_txn_range_result)that); + if (that instanceof get_next_notification_result) + return this.equals((get_next_notification_result)that); return false; } - public boolean equals(heartbeat_txn_range_result that) { + public boolean equals(get_next_notification_result that) { if (that == null) return false; @@ -168608,7 +172026,7 @@ public int hashCode() { } @Override - public int compareTo(heartbeat_txn_range_result other) { + public int compareTo(get_next_notification_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -168642,7 +172060,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("heartbeat_txn_range_result("); + StringBuilder sb = new StringBuilder("get_next_notification_result("); boolean first = true; sb.append("success:"); @@ -168680,15 +172098,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class heartbeat_txn_range_resultStandardSchemeFactory implements SchemeFactory { - public heartbeat_txn_range_resultStandardScheme getScheme() { - return new heartbeat_txn_range_resultStandardScheme(); + private static class get_next_notification_resultStandardSchemeFactory implements SchemeFactory { + public get_next_notification_resultStandardScheme getScheme() { + return new get_next_notification_resultStandardScheme(); } } - private static class heartbeat_txn_range_resultStandardScheme extends StandardScheme { + private static class get_next_notification_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_txn_range_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -168700,7 +172118,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_txn_range switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new HeartbeatTxnRangeResponse(); + struct.success = new NotificationEventResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -168716,7 +172134,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_txn_range struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_txn_range_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -168731,16 +172149,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_txn_rang } - private static class heartbeat_txn_range_resultTupleSchemeFactory implements SchemeFactory { - public heartbeat_txn_range_resultTupleScheme getScheme() { - return new heartbeat_txn_range_resultTupleScheme(); + private static class get_next_notification_resultTupleSchemeFactory implements SchemeFactory { + public get_next_notification_resultTupleScheme getScheme() { + return new get_next_notification_resultTupleScheme(); } } - private static class heartbeat_txn_range_resultTupleScheme extends TupleScheme { + private static class get_next_notification_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_txn_range_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -168753,11 +172171,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_txn_range } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_txn_range_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new HeartbeatTxnRangeResponse(); + struct.success = new NotificationEventResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -168766,22 +172184,20 @@ public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_txn_range_ } - public static class compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_args"); + public static class get_current_notificationEventId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new compact_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new compact_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_current_notificationEventId_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_current_notificationEventId_argsTupleSchemeFactory()); } - private CompactionRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); +; private static final Map byName = new HashMap(); @@ -168796,8 +172212,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; default: return null; } @@ -168836,86 +172250,37 @@ public String getFieldName() { return _fieldName; } } - - // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CompactionRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_args.class, metaDataMap); - } - - public compact_args() { + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_args.class, metaDataMap); } - public compact_args( - CompactionRequest rqst) - { - this(); - this.rqst = rqst; + public get_current_notificationEventId_args() { } /** * Performs a deep copy on other. */ - public compact_args(compact_args other) { - if (other.isSetRqst()) { - this.rqst = new CompactionRequest(other.rqst); - } + public get_current_notificationEventId_args(get_current_notificationEventId_args other) { } - public compact_args deepCopy() { - return new compact_args(this); + public get_current_notificationEventId_args deepCopy() { + return new get_current_notificationEventId_args(this); } @Override public void clear() { - this.rqst = null; - } - - public CompactionRequest getRqst() { - return this.rqst; - } - - public void setRqst(CompactionRequest rqst) { - this.rqst = rqst; - } - - public void unsetRqst() { - this.rqst = null; - } - - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; - } - - public void setRqstIsSet(boolean value) { - if (!value) { - this.rqst = null; - } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: - if (value == null) { - unsetRqst(); - } else { - setRqst((CompactionRequest)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); - } throw new IllegalStateException(); } @@ -168927,8 +172292,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); } throw new IllegalStateException(); } @@ -168937,24 +172300,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof compact_args) - return this.equals((compact_args)that); + if (that instanceof get_current_notificationEventId_args) + return this.equals((get_current_notificationEventId_args)that); return false; } - public boolean equals(compact_args that) { + public boolean equals(get_current_notificationEventId_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) - return false; - if (!this.rqst.equals(that.rqst)) - return false; - } - return true; } @@ -168962,32 +172316,17 @@ public boolean equals(compact_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); - return list.hashCode(); } @Override - public int compareTo(compact_args other) { + public int compareTo(get_current_notificationEventId_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -169005,16 +172344,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("compact_args("); + StringBuilder sb = new StringBuilder("get_current_notificationEventId_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { - sb.append("null"); - } else { - sb.append(this.rqst); - } - first = false; sb.append(")"); return sb.toString(); } @@ -169022,9 +172354,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -169043,15 +172372,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class compact_argsStandardSchemeFactory implements SchemeFactory { - public compact_argsStandardScheme getScheme() { - return new compact_argsStandardScheme(); + private static class get_current_notificationEventId_argsStandardSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_argsStandardScheme getScheme() { + return new get_current_notificationEventId_argsStandardScheme(); } } - private static class compact_argsStandardScheme extends StandardScheme { + private static class get_current_notificationEventId_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -169061,15 +172390,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct break; } switch (schemeField.id) { - case 1: // RQST - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new CompactionRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -169079,70 +172399,53 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_args struct struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class compact_argsTupleSchemeFactory implements SchemeFactory { - public compact_argsTupleScheme getScheme() { - return new compact_argsTupleScheme(); + private static class get_current_notificationEventId_argsTupleSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_argsTupleScheme getScheme() { + return new get_current_notificationEventId_argsTupleScheme(); } } - private static class compact_argsTupleScheme extends TupleScheme { + private static class get_current_notificationEventId_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.rqst = new CompactionRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); - } } } } - public static class compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("compact_result"); + public static class get_current_notificationEventId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new compact_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new compact_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_current_notificationEventId_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_current_notificationEventId_resultTupleSchemeFactory()); } + private CurrentNotificationEventId success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + SUCCESS((short)0, "success"); private static final Map byName = new HashMap(); @@ -169157,6 +172460,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; default: return null; } @@ -169195,37 +172500,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CurrentNotificationEventId.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(compact_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_result.class, metaDataMap); } - public compact_result() { + public get_current_notificationEventId_result() { + } + + public get_current_notificationEventId_result( + CurrentNotificationEventId success) + { + this(); + this.success = success; } /** * Performs a deep copy on other. */ - public compact_result(compact_result other) { + public get_current_notificationEventId_result(get_current_notificationEventId_result other) { + if (other.isSetSuccess()) { + this.success = new CurrentNotificationEventId(other.success); + } } - public compact_result deepCopy() { - return new compact_result(this); + public get_current_notificationEventId_result deepCopy() { + return new get_current_notificationEventId_result(this); } @Override public void clear() { + this.success = null; + } + + public CurrentNotificationEventId getSuccess() { + return this.success; + } + + public void setSuccess(CurrentNotificationEventId success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((CurrentNotificationEventId)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + } throw new IllegalStateException(); } @@ -169237,6 +172591,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); } throw new IllegalStateException(); } @@ -169245,15 +172601,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof compact_result) - return this.equals((compact_result)that); + if (that instanceof get_current_notificationEventId_result) + return this.equals((get_current_notificationEventId_result)that); return false; } - public boolean equals(compact_result that) { + public boolean equals(get_current_notificationEventId_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + return true; } @@ -169261,17 +172626,32 @@ public boolean equals(compact_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + return list.hashCode(); } @Override - public int compareTo(compact_result other) { + public int compareTo(get_current_notificationEventId_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -169289,9 +172669,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("compact_result("); + StringBuilder sb = new StringBuilder("get_current_notificationEventId_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; sb.append(")"); return sb.toString(); } @@ -169299,6 +172686,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -169317,15 +172707,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class compact_resultStandardSchemeFactory implements SchemeFactory { - public compact_resultStandardScheme getScheme() { - return new compact_resultStandardScheme(); + private static class get_current_notificationEventId_resultStandardSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_resultStandardScheme getScheme() { + return new get_current_notificationEventId_resultStandardScheme(); } } - private static class compact_resultStandardScheme extends StandardScheme { + private static class get_current_notificationEventId_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -169335,6 +172725,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result stru break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new CurrentNotificationEventId(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -169344,49 +172743,68 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, compact_result stru struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class compact_resultTupleSchemeFactory implements SchemeFactory { - public compact_resultTupleScheme getScheme() { - return new compact_resultTupleScheme(); + private static class get_current_notificationEventId_resultTupleSchemeFactory implements SchemeFactory { + public get_current_notificationEventId_resultTupleScheme getScheme() { + return new get_current_notificationEventId_resultTupleScheme(); } } - private static class compact_resultTupleScheme extends TupleScheme { + private static class get_current_notificationEventId_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new CurrentNotificationEventId(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } } } } - public static class show_compact_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_args"); + public static class fire_listener_event_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_args"); private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new show_compact_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new show_compact_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new fire_listener_event_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fire_listener_event_argsTupleSchemeFactory()); } - private ShowCompactRequest rqst; // required + private FireEventRequest rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -169451,16 +172869,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_args.class, metaDataMap); } - public show_compact_args() { + public fire_listener_event_args() { } - public show_compact_args( - ShowCompactRequest rqst) + public fire_listener_event_args( + FireEventRequest rqst) { this(); this.rqst = rqst; @@ -169469,14 +172887,14 @@ public show_compact_args( /** * Performs a deep copy on other. */ - public show_compact_args(show_compact_args other) { + public fire_listener_event_args(fire_listener_event_args other) { if (other.isSetRqst()) { - this.rqst = new ShowCompactRequest(other.rqst); + this.rqst = new FireEventRequest(other.rqst); } } - public show_compact_args deepCopy() { - return new show_compact_args(this); + public fire_listener_event_args deepCopy() { + return new fire_listener_event_args(this); } @Override @@ -169484,11 +172902,11 @@ public void clear() { this.rqst = null; } - public ShowCompactRequest getRqst() { + public FireEventRequest getRqst() { return this.rqst; } - public void setRqst(ShowCompactRequest rqst) { + public void setRqst(FireEventRequest rqst) { this.rqst = rqst; } @@ -169513,7 +172931,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRqst(); } else { - setRqst((ShowCompactRequest)value); + setRqst((FireEventRequest)value); } break; @@ -169546,12 +172964,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof show_compact_args) - return this.equals((show_compact_args)that); + if (that instanceof fire_listener_event_args) + return this.equals((fire_listener_event_args)that); return false; } - public boolean equals(show_compact_args that) { + public boolean equals(fire_listener_event_args that) { if (that == null) return false; @@ -169580,7 +172998,7 @@ public int hashCode() { } @Override - public int compareTo(show_compact_args other) { + public int compareTo(fire_listener_event_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -169614,7 +173032,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("show_compact_args("); + StringBuilder sb = new StringBuilder("fire_listener_event_args("); boolean first = true; sb.append("rqst:"); @@ -169652,15 +173070,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class show_compact_argsStandardSchemeFactory implements SchemeFactory { - public show_compact_argsStandardScheme getScheme() { - return new show_compact_argsStandardScheme(); + private static class fire_listener_event_argsStandardSchemeFactory implements SchemeFactory { + public fire_listener_event_argsStandardScheme getScheme() { + return new fire_listener_event_argsStandardScheme(); } } - private static class show_compact_argsStandardScheme extends StandardScheme { + private static class fire_listener_event_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -169672,7 +173090,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args s switch (schemeField.id) { case 1: // RQST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new ShowCompactRequest(); + struct.rqst = new FireEventRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } else { @@ -169688,7 +173106,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_args s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -169703,16 +173121,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_args } - private static class show_compact_argsTupleSchemeFactory implements SchemeFactory { - public show_compact_argsTupleScheme getScheme() { - return new show_compact_argsTupleScheme(); + private static class fire_listener_event_argsTupleSchemeFactory implements SchemeFactory { + public fire_listener_event_argsTupleScheme getScheme() { + return new fire_listener_event_argsTupleScheme(); } } - private static class show_compact_argsTupleScheme extends TupleScheme { + private static class fire_listener_event_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRqst()) { @@ -169725,11 +173143,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_args s } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new ShowCompactRequest(); + struct.rqst = new FireEventRequest(); struct.rqst.read(iprot); struct.setRqstIsSet(true); } @@ -169738,18 +173156,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_args st } - public static class show_compact_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("show_compact_result"); + public static class fire_listener_event_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new show_compact_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new show_compact_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new fire_listener_event_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fire_listener_event_resultTupleSchemeFactory()); } - private ShowCompactResponse success; // required + private FireEventResponse success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -169814,16 +173232,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ShowCompactResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventResponse.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(show_compact_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_result.class, metaDataMap); } - public show_compact_result() { + public fire_listener_event_result() { } - public show_compact_result( - ShowCompactResponse success) + public fire_listener_event_result( + FireEventResponse success) { this(); this.success = success; @@ -169832,14 +173250,14 @@ public show_compact_result( /** * Performs a deep copy on other. */ - public show_compact_result(show_compact_result other) { + public fire_listener_event_result(fire_listener_event_result other) { if (other.isSetSuccess()) { - this.success = new ShowCompactResponse(other.success); + this.success = new FireEventResponse(other.success); } } - public show_compact_result deepCopy() { - return new show_compact_result(this); + public fire_listener_event_result deepCopy() { + return new fire_listener_event_result(this); } @Override @@ -169847,11 +173265,11 @@ public void clear() { this.success = null; } - public ShowCompactResponse getSuccess() { + public FireEventResponse getSuccess() { return this.success; } - public void setSuccess(ShowCompactResponse success) { + public void setSuccess(FireEventResponse success) { this.success = success; } @@ -169876,7 +173294,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((ShowCompactResponse)value); + setSuccess((FireEventResponse)value); } break; @@ -169909,12 +173327,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof show_compact_result) - return this.equals((show_compact_result)that); + if (that instanceof fire_listener_event_result) + return this.equals((fire_listener_event_result)that); return false; } - public boolean equals(show_compact_result that) { + public boolean equals(fire_listener_event_result that) { if (that == null) return false; @@ -169943,7 +173361,7 @@ public int hashCode() { } @Override - public int compareTo(show_compact_result other) { + public int compareTo(fire_listener_event_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -169977,7 +173395,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("show_compact_result("); + StringBuilder sb = new StringBuilder("fire_listener_event_result("); boolean first = true; sb.append("success:"); @@ -170015,15 +173433,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class show_compact_resultStandardSchemeFactory implements SchemeFactory { - public show_compact_resultStandardScheme getScheme() { - return new show_compact_resultStandardScheme(); + private static class fire_listener_event_resultStandardSchemeFactory implements SchemeFactory { + public fire_listener_event_resultStandardScheme getScheme() { + return new fire_listener_event_resultStandardScheme(); } } - private static class show_compact_resultStandardScheme extends StandardScheme { + private static class fire_listener_event_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -170035,7 +173453,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new ShowCompactResponse(); + struct.success = new FireEventResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -170051,7 +173469,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, show_compact_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -170066,16 +173484,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, show_compact_resul } - private static class show_compact_resultTupleSchemeFactory implements SchemeFactory { - public show_compact_resultTupleScheme getScheme() { - return new show_compact_resultTupleScheme(); + private static class fire_listener_event_resultTupleSchemeFactory implements SchemeFactory { + public fire_listener_event_resultTupleScheme getScheme() { + return new fire_listener_event_resultTupleScheme(); } } - private static class show_compact_resultTupleScheme extends TupleScheme { + private static class fire_listener_event_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -170088,11 +173506,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, show_compact_result } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new ShowCompactResponse(); + struct.success = new FireEventResponse(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -170101,22 +173519,20 @@ public void read(org.apache.thrift.protocol.TProtocol prot, show_compact_result } - public static class add_dynamic_partitions_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_args"); + public static class flushCache_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new add_dynamic_partitions_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new add_dynamic_partitions_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new flushCache_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new flushCache_argsTupleSchemeFactory()); } - private AddDynamicPartitions rqst; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); +; private static final Map byName = new HashMap(); @@ -170131,8 +173547,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; default: return null; } @@ -170171,86 +173585,37 @@ public String getFieldName() { return _fieldName; } } - - // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AddDynamicPartitions.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_args.class, metaDataMap); - } - - public add_dynamic_partitions_args() { + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_args.class, metaDataMap); } - public add_dynamic_partitions_args( - AddDynamicPartitions rqst) - { - this(); - this.rqst = rqst; + public flushCache_args() { } /** * Performs a deep copy on other. */ - public add_dynamic_partitions_args(add_dynamic_partitions_args other) { - if (other.isSetRqst()) { - this.rqst = new AddDynamicPartitions(other.rqst); - } + public flushCache_args(flushCache_args other) { } - public add_dynamic_partitions_args deepCopy() { - return new add_dynamic_partitions_args(this); + public flushCache_args deepCopy() { + return new flushCache_args(this); } @Override public void clear() { - this.rqst = null; - } - - public AddDynamicPartitions getRqst() { - return this.rqst; - } - - public void setRqst(AddDynamicPartitions rqst) { - this.rqst = rqst; - } - - public void unsetRqst() { - this.rqst = null; - } - - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; - } - - public void setRqstIsSet(boolean value) { - if (!value) { - this.rqst = null; - } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: - if (value == null) { - unsetRqst(); - } else { - setRqst((AddDynamicPartitions)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); - } throw new IllegalStateException(); } @@ -170262,8 +173627,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); } throw new IllegalStateException(); } @@ -170272,24 +173635,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof add_dynamic_partitions_args) - return this.equals((add_dynamic_partitions_args)that); + if (that instanceof flushCache_args) + return this.equals((flushCache_args)that); return false; } - public boolean equals(add_dynamic_partitions_args that) { + public boolean equals(flushCache_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) - return false; - if (!this.rqst.equals(that.rqst)) - return false; - } - return true; } @@ -170297,32 +173651,17 @@ public boolean equals(add_dynamic_partitions_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); - return list.hashCode(); } @Override - public int compareTo(add_dynamic_partitions_args other) { + public int compareTo(flushCache_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -170340,16 +173679,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("add_dynamic_partitions_args("); + StringBuilder sb = new StringBuilder("flushCache_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { - sb.append("null"); - } else { - sb.append(this.rqst); - } - first = false; sb.append(")"); return sb.toString(); } @@ -170357,9 +173689,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -170378,15 +173707,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class add_dynamic_partitions_argsStandardSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_argsStandardScheme getScheme() { - return new add_dynamic_partitions_argsStandardScheme(); + private static class flushCache_argsStandardSchemeFactory implements SchemeFactory { + public flushCache_argsStandardScheme getScheme() { + return new flushCache_argsStandardScheme(); } } - private static class add_dynamic_partitions_argsStandardScheme extends StandardScheme { + private static class flushCache_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -170396,15 +173725,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti break; } switch (schemeField.id) { - case 1: // RQST - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new AddDynamicPartitions(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -170414,75 +173734,51 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class add_dynamic_partitions_argsTupleSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_argsTupleScheme getScheme() { - return new add_dynamic_partitions_argsTupleScheme(); + private static class flushCache_argsTupleSchemeFactory implements SchemeFactory { + public flushCache_argsTupleScheme getScheme() { + return new flushCache_argsTupleScheme(); } } - private static class add_dynamic_partitions_argsTupleScheme extends TupleScheme { + private static class flushCache_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { - optionals.set(0); - } - oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(1); - if (incoming.get(0)) { - struct.rqst = new AddDynamicPartitions(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); - } } } } - public static class add_dynamic_partitions_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("add_dynamic_partitions_result"); + public static class flushCache_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_result"); - private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new add_dynamic_partitions_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new add_dynamic_partitions_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new flushCache_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new flushCache_resultTupleSchemeFactory()); } - private NoSuchTxnException o1; // required - private TxnAbortedException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - O1((short)1, "o1"), - O2((short)2, "o2"); +; private static final Map byName = new HashMap(); @@ -170497,10 +173793,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // O1 - return O1; - case 2: // O2 - return O2; default: return null; } @@ -170539,128 +173831,37 @@ public String getFieldName() { return _fieldName; } } - - // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); - tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(add_dynamic_partitions_result.class, metaDataMap); - } - - public add_dynamic_partitions_result() { + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_result.class, metaDataMap); } - public add_dynamic_partitions_result( - NoSuchTxnException o1, - TxnAbortedException o2) - { - this(); - this.o1 = o1; - this.o2 = o2; + public flushCache_result() { } /** * Performs a deep copy on other. */ - public add_dynamic_partitions_result(add_dynamic_partitions_result other) { - if (other.isSetO1()) { - this.o1 = new NoSuchTxnException(other.o1); - } - if (other.isSetO2()) { - this.o2 = new TxnAbortedException(other.o2); - } + public flushCache_result(flushCache_result other) { } - public add_dynamic_partitions_result deepCopy() { - return new add_dynamic_partitions_result(this); + public flushCache_result deepCopy() { + return new flushCache_result(this); } @Override public void clear() { - this.o1 = null; - this.o2 = null; - } - - public NoSuchTxnException getO1() { - return this.o1; - } - - public void setO1(NoSuchTxnException o1) { - this.o1 = o1; - } - - public void unsetO1() { - this.o1 = null; - } - - /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ - public boolean isSetO1() { - return this.o1 != null; - } - - public void setO1IsSet(boolean value) { - if (!value) { - this.o1 = null; - } - } - - public TxnAbortedException getO2() { - return this.o2; - } - - public void setO2(TxnAbortedException o2) { - this.o2 = o2; - } - - public void unsetO2() { - this.o2 = null; - } - - /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ - public boolean isSetO2() { - return this.o2 != null; - } - - public void setO2IsSet(boolean value) { - if (!value) { - this.o2 = null; - } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case O1: - if (value == null) { - unsetO1(); - } else { - setO1((NoSuchTxnException)value); - } - break; - - case O2: - if (value == null) { - unsetO2(); - } else { - setO2((TxnAbortedException)value); - } - break; - } } public Object getFieldValue(_Fields field) { switch (field) { - case O1: - return getO1(); - - case O2: - return getO2(); - } throw new IllegalStateException(); } @@ -170672,10 +173873,6 @@ public boolean isSet(_Fields field) { } switch (field) { - case O1: - return isSetO1(); - case O2: - return isSetO2(); } throw new IllegalStateException(); } @@ -170684,33 +173881,15 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof add_dynamic_partitions_result) - return this.equals((add_dynamic_partitions_result)that); + if (that instanceof flushCache_result) + return this.equals((flushCache_result)that); return false; } - public boolean equals(add_dynamic_partitions_result that) { + public boolean equals(flushCache_result that) { if (that == null) return false; - boolean this_present_o1 = true && this.isSetO1(); - boolean that_present_o1 = true && that.isSetO1(); - if (this_present_o1 || that_present_o1) { - if (!(this_present_o1 && that_present_o1)) - return false; - if (!this.o1.equals(that.o1)) - return false; - } - - boolean this_present_o2 = true && this.isSetO2(); - boolean that_present_o2 = true && that.isSetO2(); - if (this_present_o2 || that_present_o2) { - if (!(this_present_o2 && that_present_o2)) - return false; - if (!this.o2.equals(that.o2)) - return false; - } - return true; } @@ -170718,47 +173897,17 @@ public boolean equals(add_dynamic_partitions_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_o1 = true && (isSetO1()); - list.add(present_o1); - if (present_o1) - list.add(o1); - - boolean present_o2 = true && (isSetO2()); - list.add(present_o2); - if (present_o2) - list.add(o2); - return list.hashCode(); } @Override - public int compareTo(add_dynamic_partitions_result other) { + public int compareTo(flushCache_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO1()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO2()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -170776,24 +173925,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("add_dynamic_partitions_result("); + StringBuilder sb = new StringBuilder("flushCache_result("); boolean first = true; - sb.append("o1:"); - if (this.o1 == null) { - sb.append("null"); - } else { - sb.append(this.o1); - } - first = false; - if (!first) sb.append(", "); - sb.append("o2:"); - if (this.o2 == null) { - sb.append("null"); - } else { - sb.append(this.o2); - } - first = false; sb.append(")"); return sb.toString(); } @@ -170819,15 +173953,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class add_dynamic_partitions_resultStandardSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_resultStandardScheme getScheme() { - return new add_dynamic_partitions_resultStandardScheme(); + private static class flushCache_resultStandardSchemeFactory implements SchemeFactory { + public flushCache_resultStandardScheme getScheme() { + return new flushCache_resultStandardScheme(); } } - private static class add_dynamic_partitions_resultStandardScheme extends StandardScheme { + private static class flushCache_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -170837,24 +173971,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti break; } switch (schemeField.id) { - case 1: // O1 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchTxnException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // O2 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new TxnAbortedException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -170864,88 +173980,53 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_dynamic_partiti struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.o1 != null) { - oprot.writeFieldBegin(O1_FIELD_DESC); - struct.o1.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.o2 != null) { - oprot.writeFieldBegin(O2_FIELD_DESC); - struct.o2.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class add_dynamic_partitions_resultTupleSchemeFactory implements SchemeFactory { - public add_dynamic_partitions_resultTupleScheme getScheme() { - return new add_dynamic_partitions_resultTupleScheme(); + private static class flushCache_resultTupleSchemeFactory implements SchemeFactory { + public flushCache_resultTupleScheme getScheme() { + return new flushCache_resultTupleScheme(); } } - private static class add_dynamic_partitions_resultTupleScheme extends TupleScheme { + private static class flushCache_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; - BitSet optionals = new BitSet(); - if (struct.isSetO1()) { - optionals.set(0); - } - if (struct.isSetO2()) { - optionals.set(1); - } - oprot.writeBitSet(optionals, 2); - if (struct.isSetO1()) { - struct.o1.write(oprot); - } - if (struct.isSetO2()) { - struct.o2.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, add_dynamic_partitions_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); - if (incoming.get(0)) { - struct.o1 = new NoSuchTxnException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } - if (incoming.get(1)) { - struct.o2 = new TxnAbortedException(); - struct.o2.read(iprot); - struct.setO2IsSet(true); - } } } } - public static class get_next_notification_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_args"); + public static class get_file_metadata_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_next_notification_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_next_notification_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_by_expr_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_by_expr_argsTupleSchemeFactory()); } - private NotificationEventRequest rqst; // required + private GetFileMetadataByExprRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -170960,8 +174041,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; + case 1: // REQ + return REQ; default: return null; } @@ -171005,70 +174086,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventRequest.class))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_args.class, metaDataMap); } - public get_next_notification_args() { + public get_file_metadata_by_expr_args() { } - public get_next_notification_args( - NotificationEventRequest rqst) + public get_file_metadata_by_expr_args( + GetFileMetadataByExprRequest req) { this(); - this.rqst = rqst; + this.req = req; } /** * Performs a deep copy on other. */ - public get_next_notification_args(get_next_notification_args other) { - if (other.isSetRqst()) { - this.rqst = new NotificationEventRequest(other.rqst); + public get_file_metadata_by_expr_args(get_file_metadata_by_expr_args other) { + if (other.isSetReq()) { + this.req = new GetFileMetadataByExprRequest(other.req); } } - public get_next_notification_args deepCopy() { - return new get_next_notification_args(this); + public get_file_metadata_by_expr_args deepCopy() { + return new get_file_metadata_by_expr_args(this); } @Override public void clear() { - this.rqst = null; + this.req = null; } - public NotificationEventRequest getRqst() { - return this.rqst; + public GetFileMetadataByExprRequest getReq() { + return this.req; } - public void setRqst(NotificationEventRequest rqst) { - this.rqst = rqst; + public void setReq(GetFileMetadataByExprRequest req) { + this.req = req; } - public void unsetRqst() { - this.rqst = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setRqstIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.rqst = null; + this.req = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: + case REQ: if (value == null) { - unsetRqst(); + unsetReq(); } else { - setRqst((NotificationEventRequest)value); + setReq((GetFileMetadataByExprRequest)value); } break; @@ -171077,8 +174158,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -171091,8 +174172,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -171101,21 +174182,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_next_notification_args) - return this.equals((get_next_notification_args)that); + if (that instanceof get_file_metadata_by_expr_args) + return this.equals((get_file_metadata_by_expr_args)that); return false; } - public boolean equals(get_next_notification_args that) { + public boolean equals(get_file_metadata_by_expr_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (!this.rqst.equals(that.rqst)) + if (!this.req.equals(that.req)) return false; } @@ -171126,28 +174207,28 @@ public boolean equals(get_next_notification_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @Override - public int compareTo(get_next_notification_args other) { + public int compareTo(get_file_metadata_by_expr_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -171169,14 +174250,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_next_notification_args("); + StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.rqst); + sb.append(this.req); } first = false; sb.append(")"); @@ -171186,8 +174267,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); + if (req != null) { + req.validate(); } } @@ -171207,15 +174288,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_next_notification_argsStandardSchemeFactory implements SchemeFactory { - public get_next_notification_argsStandardScheme getScheme() { - return new get_next_notification_argsStandardScheme(); + private static class get_file_metadata_by_expr_argsStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_argsStandardScheme getScheme() { + return new get_file_metadata_by_expr_argsStandardScheme(); } } - private static class get_next_notification_argsStandardScheme extends StandardScheme { + private static class get_file_metadata_by_expr_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -171225,11 +174306,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati break; } switch (schemeField.id) { - case 1: // RQST + case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new NotificationEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new GetFileMetadataByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -171243,13 +174324,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -171258,53 +174339,53 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notificat } - private static class get_next_notification_argsTupleSchemeFactory implements SchemeFactory { - public get_next_notification_argsTupleScheme getScheme() { - return new get_next_notification_argsTupleScheme(); + private static class get_file_metadata_by_expr_argsTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_argsTupleScheme getScheme() { + return new get_file_metadata_by_expr_argsTupleScheme(); } } - private static class get_next_notification_argsTupleScheme extends TupleScheme { + private static class get_file_metadata_by_expr_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { + if (struct.isSetReq()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new NotificationEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new GetFileMetadataByExprRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - public static class get_next_notification_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_notification_result"); + public static class get_file_metadata_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_next_notification_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_next_notification_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_by_expr_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_by_expr_resultTupleSchemeFactory()); } - private NotificationEventResponse success; // required + private GetFileMetadataByExprResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -171369,16 +174450,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEventResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_notification_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_result.class, metaDataMap); } - public get_next_notification_result() { + public get_file_metadata_by_expr_result() { } - public get_next_notification_result( - NotificationEventResponse success) + public get_file_metadata_by_expr_result( + GetFileMetadataByExprResult success) { this(); this.success = success; @@ -171387,14 +174468,14 @@ public get_next_notification_result( /** * Performs a deep copy on other. */ - public get_next_notification_result(get_next_notification_result other) { + public get_file_metadata_by_expr_result(get_file_metadata_by_expr_result other) { if (other.isSetSuccess()) { - this.success = new NotificationEventResponse(other.success); + this.success = new GetFileMetadataByExprResult(other.success); } } - public get_next_notification_result deepCopy() { - return new get_next_notification_result(this); + public get_file_metadata_by_expr_result deepCopy() { + return new get_file_metadata_by_expr_result(this); } @Override @@ -171402,11 +174483,11 @@ public void clear() { this.success = null; } - public NotificationEventResponse getSuccess() { + public GetFileMetadataByExprResult getSuccess() { return this.success; } - public void setSuccess(NotificationEventResponse success) { + public void setSuccess(GetFileMetadataByExprResult success) { this.success = success; } @@ -171431,7 +174512,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((NotificationEventResponse)value); + setSuccess((GetFileMetadataByExprResult)value); } break; @@ -171464,12 +174545,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_next_notification_result) - return this.equals((get_next_notification_result)that); + if (that instanceof get_file_metadata_by_expr_result) + return this.equals((get_file_metadata_by_expr_result)that); return false; } - public boolean equals(get_next_notification_result that) { + public boolean equals(get_file_metadata_by_expr_result that) { if (that == null) return false; @@ -171498,7 +174579,7 @@ public int hashCode() { } @Override - public int compareTo(get_next_notification_result other) { + public int compareTo(get_file_metadata_by_expr_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -171532,7 +174613,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_next_notification_result("); + StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_result("); boolean first = true; sb.append("success:"); @@ -171570,15 +174651,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_next_notification_resultStandardSchemeFactory implements SchemeFactory { - public get_next_notification_resultStandardScheme getScheme() { - return new get_next_notification_resultStandardScheme(); + private static class get_file_metadata_by_expr_resultStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_resultStandardScheme getScheme() { + return new get_file_metadata_by_expr_resultStandardScheme(); } } - private static class get_next_notification_resultStandardScheme extends StandardScheme { + private static class get_file_metadata_by_expr_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -171590,7 +174671,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new NotificationEventResponse(); + struct.success = new GetFileMetadataByExprResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -171606,7 +174687,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_notificati struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -171621,16 +174702,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_notificat } - private static class get_next_notification_resultTupleSchemeFactory implements SchemeFactory { - public get_next_notification_resultTupleScheme getScheme() { - return new get_next_notification_resultTupleScheme(); + private static class get_file_metadata_by_expr_resultTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_by_expr_resultTupleScheme getScheme() { + return new get_file_metadata_by_expr_resultTupleScheme(); } } - private static class get_next_notification_resultTupleScheme extends TupleScheme { + private static class get_file_metadata_by_expr_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -171643,11 +174724,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_next_notificati } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notification_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new NotificationEventResponse(); + struct.success = new GetFileMetadataByExprResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -171656,20 +174737,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_next_notificatio } - public static class get_current_notificationEventId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_args"); + public static class get_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_args"); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_current_notificationEventId_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_current_notificationEventId_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_argsTupleSchemeFactory()); } + private GetFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -171684,6 +174767,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // REQ + return REQ; default: return null; } @@ -171722,37 +174807,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_args.class, metaDataMap); } - public get_current_notificationEventId_args() { + public get_file_metadata_args() { + } + + public get_file_metadata_args( + GetFileMetadataRequest req) + { + this(); + this.req = req; } /** * Performs a deep copy on other. */ - public get_current_notificationEventId_args(get_current_notificationEventId_args other) { + public get_file_metadata_args(get_file_metadata_args other) { + if (other.isSetReq()) { + this.req = new GetFileMetadataRequest(other.req); + } } - public get_current_notificationEventId_args deepCopy() { - return new get_current_notificationEventId_args(this); + public get_file_metadata_args deepCopy() { + return new get_file_metadata_args(this); } @Override public void clear() { + this.req = null; + } + + public GetFileMetadataRequest getReq() { + return this.req; + } + + public void setReq(GetFileMetadataRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((GetFileMetadataRequest)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case REQ: + return getReq(); + } throw new IllegalStateException(); } @@ -171764,6 +174898,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -171772,15 +174908,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_current_notificationEventId_args) - return this.equals((get_current_notificationEventId_args)that); + if (that instanceof get_file_metadata_args) + return this.equals((get_file_metadata_args)that); return false; } - public boolean equals(get_current_notificationEventId_args that) { + public boolean equals(get_file_metadata_args that) { if (that == null) return false; + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + return true; } @@ -171788,17 +174933,32 @@ public boolean equals(get_current_notificationEventId_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + return list.hashCode(); } @Override - public int compareTo(get_current_notificationEventId_args other) { + public int compareTo(get_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -171816,9 +174976,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_current_notificationEventId_args("); + StringBuilder sb = new StringBuilder("get_file_metadata_args("); boolean first = true; + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; sb.append(")"); return sb.toString(); } @@ -171826,6 +174993,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (req != null) { + req.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -171844,15 +175014,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_current_notificationEventId_argsStandardSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_argsStandardScheme getScheme() { - return new get_current_notificationEventId_argsStandardScheme(); + private static class get_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_argsStandardScheme getScheme() { + return new get_file_metadata_argsStandardScheme(); } } - private static class get_current_notificationEventId_argsStandardScheme extends StandardScheme { + private static class get_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -171862,6 +175032,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific break; } switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new GetFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -171871,49 +175050,68 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_current_notificationEventId_argsTupleSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_argsTupleScheme getScheme() { - return new get_current_notificationEventId_argsTupleScheme(); + private static class get_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_argsTupleScheme getScheme() { + return new get_file_metadata_argsTupleScheme(); } } - private static class get_current_notificationEventId_argsTupleScheme extends TupleScheme { + private static class get_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new GetFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } } } } - public static class get_current_notificationEventId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_current_notificationEventId_result"); + public static class get_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_current_notificationEventId_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_current_notificationEventId_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_file_metadata_resultTupleSchemeFactory()); } - private CurrentNotificationEventId success; // required + private GetFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -171978,16 +175176,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CurrentNotificationEventId.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_current_notificationEventId_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_result.class, metaDataMap); } - public get_current_notificationEventId_result() { + public get_file_metadata_result() { } - public get_current_notificationEventId_result( - CurrentNotificationEventId success) + public get_file_metadata_result( + GetFileMetadataResult success) { this(); this.success = success; @@ -171996,14 +175194,14 @@ public get_current_notificationEventId_result( /** * Performs a deep copy on other. */ - public get_current_notificationEventId_result(get_current_notificationEventId_result other) { + public get_file_metadata_result(get_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new CurrentNotificationEventId(other.success); + this.success = new GetFileMetadataResult(other.success); } } - public get_current_notificationEventId_result deepCopy() { - return new get_current_notificationEventId_result(this); + public get_file_metadata_result deepCopy() { + return new get_file_metadata_result(this); } @Override @@ -172011,11 +175209,11 @@ public void clear() { this.success = null; } - public CurrentNotificationEventId getSuccess() { + public GetFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(CurrentNotificationEventId success) { + public void setSuccess(GetFileMetadataResult success) { this.success = success; } @@ -172040,7 +175238,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((CurrentNotificationEventId)value); + setSuccess((GetFileMetadataResult)value); } break; @@ -172073,12 +175271,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_current_notificationEventId_result) - return this.equals((get_current_notificationEventId_result)that); + if (that instanceof get_file_metadata_result) + return this.equals((get_file_metadata_result)that); return false; } - public boolean equals(get_current_notificationEventId_result that) { + public boolean equals(get_file_metadata_result that) { if (that == null) return false; @@ -172107,7 +175305,7 @@ public int hashCode() { } @Override - public int compareTo(get_current_notificationEventId_result other) { + public int compareTo(get_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -172141,7 +175339,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_current_notificationEventId_result("); + StringBuilder sb = new StringBuilder("get_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -172179,15 +175377,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_current_notificationEventId_resultStandardSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_resultStandardScheme getScheme() { - return new get_current_notificationEventId_resultStandardScheme(); + private static class get_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public get_file_metadata_resultStandardScheme getScheme() { + return new get_file_metadata_resultStandardScheme(); } } - private static class get_current_notificationEventId_resultStandardScheme extends StandardScheme { + private static class get_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -172199,7 +175397,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new CurrentNotificationEventId(); + struct.success = new GetFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -172215,7 +175413,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_current_notific struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -172230,16 +175428,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_current_notifi } - private static class get_current_notificationEventId_resultTupleSchemeFactory implements SchemeFactory { - public get_current_notificationEventId_resultTupleScheme getScheme() { - return new get_current_notificationEventId_resultTupleScheme(); + private static class get_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public get_file_metadata_resultTupleScheme getScheme() { + return new get_file_metadata_resultTupleScheme(); } } - private static class get_current_notificationEventId_resultTupleScheme extends TupleScheme { + private static class get_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -172252,11 +175450,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_current_notific } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notificationEventId_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new CurrentNotificationEventId(); + struct.success = new GetFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -172265,22 +175463,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_current_notifica } - public static class fire_listener_event_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_args"); + public static class put_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_args"); - private static final org.apache.thrift.protocol.TField RQST_FIELD_DESC = new org.apache.thrift.protocol.TField("rqst", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new fire_listener_event_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new fire_listener_event_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new put_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new put_file_metadata_argsTupleSchemeFactory()); } - private FireEventRequest rqst; // required + private PutFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - RQST((short)1, "rqst"); + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -172295,8 +175493,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // RQST - return RQST; + case 1: // REQ + return REQ; default: return null; } @@ -172340,70 +175538,70 @@ public String getFieldName() { public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.RQST, new org.apache.thrift.meta_data.FieldMetaData("rqst", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventRequest.class))); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_args.class, metaDataMap); } - public fire_listener_event_args() { + public put_file_metadata_args() { } - public fire_listener_event_args( - FireEventRequest rqst) + public put_file_metadata_args( + PutFileMetadataRequest req) { this(); - this.rqst = rqst; + this.req = req; } /** * Performs a deep copy on other. */ - public fire_listener_event_args(fire_listener_event_args other) { - if (other.isSetRqst()) { - this.rqst = new FireEventRequest(other.rqst); + public put_file_metadata_args(put_file_metadata_args other) { + if (other.isSetReq()) { + this.req = new PutFileMetadataRequest(other.req); } } - public fire_listener_event_args deepCopy() { - return new fire_listener_event_args(this); + public put_file_metadata_args deepCopy() { + return new put_file_metadata_args(this); } @Override public void clear() { - this.rqst = null; + this.req = null; } - public FireEventRequest getRqst() { - return this.rqst; + public PutFileMetadataRequest getReq() { + return this.req; } - public void setRqst(FireEventRequest rqst) { - this.rqst = rqst; + public void setReq(PutFileMetadataRequest req) { + this.req = req; } - public void unsetRqst() { - this.rqst = null; + public void unsetReq() { + this.req = null; } - /** Returns true if field rqst is set (has been assigned a value) and false otherwise */ - public boolean isSetRqst() { - return this.rqst != null; + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; } - public void setRqstIsSet(boolean value) { + public void setReqIsSet(boolean value) { if (!value) { - this.rqst = null; + this.req = null; } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case RQST: + case REQ: if (value == null) { - unsetRqst(); + unsetReq(); } else { - setRqst((FireEventRequest)value); + setReq((PutFileMetadataRequest)value); } break; @@ -172412,8 +175610,8 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case RQST: - return getRqst(); + case REQ: + return getReq(); } throw new IllegalStateException(); @@ -172426,8 +175624,8 @@ public boolean isSet(_Fields field) { } switch (field) { - case RQST: - return isSetRqst(); + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -172436,21 +175634,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof fire_listener_event_args) - return this.equals((fire_listener_event_args)that); + if (that instanceof put_file_metadata_args) + return this.equals((put_file_metadata_args)that); return false; } - public boolean equals(fire_listener_event_args that) { + public boolean equals(put_file_metadata_args that) { if (that == null) return false; - boolean this_present_rqst = true && this.isSetRqst(); - boolean that_present_rqst = true && that.isSetRqst(); - if (this_present_rqst || that_present_rqst) { - if (!(this_present_rqst && that_present_rqst)) + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) return false; - if (!this.rqst.equals(that.rqst)) + if (!this.req.equals(that.req)) return false; } @@ -172461,28 +175659,28 @@ public boolean equals(fire_listener_event_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_rqst = true && (isSetRqst()); - list.add(present_rqst); - if (present_rqst) - list.add(rqst); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); return list.hashCode(); } @Override - public int compareTo(fire_listener_event_args other) { + public int compareTo(put_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetRqst()).compareTo(other.isSetRqst()); + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); if (lastComparison != 0) { return lastComparison; } - if (isSetRqst()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rqst, other.rqst); + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); if (lastComparison != 0) { return lastComparison; } @@ -172504,14 +175702,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("fire_listener_event_args("); + StringBuilder sb = new StringBuilder("put_file_metadata_args("); boolean first = true; - sb.append("rqst:"); - if (this.rqst == null) { + sb.append("req:"); + if (this.req == null) { sb.append("null"); } else { - sb.append(this.rqst); + sb.append(this.req); } first = false; sb.append(")"); @@ -172521,8 +175719,8 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (rqst != null) { - rqst.validate(); + if (req != null) { + req.validate(); } } @@ -172542,15 +175740,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class fire_listener_event_argsStandardSchemeFactory implements SchemeFactory { - public fire_listener_event_argsStandardScheme getScheme() { - return new fire_listener_event_argsStandardScheme(); + private static class put_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public put_file_metadata_argsStandardScheme getScheme() { + return new put_file_metadata_argsStandardScheme(); } } - private static class fire_listener_event_argsStandardScheme extends StandardScheme { + private static class put_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -172560,11 +175758,11 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event break; } switch (schemeField.id) { - case 1: // RQST + case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.rqst = new FireEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new PutFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -172578,13 +175776,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.rqst != null) { - oprot.writeFieldBegin(RQST_FIELD_DESC); - struct.rqst.write(oprot); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); oprot.writeFieldEnd(); } oprot.writeFieldStop(); @@ -172593,53 +175791,53 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_even } - private static class fire_listener_event_argsTupleSchemeFactory implements SchemeFactory { - public fire_listener_event_argsTupleScheme getScheme() { - return new fire_listener_event_argsTupleScheme(); + private static class put_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public put_file_metadata_argsTupleScheme getScheme() { + return new put_file_metadata_argsTupleScheme(); } } - private static class fire_listener_event_argsTupleScheme extends TupleScheme { + private static class put_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetRqst()) { + if (struct.isSetReq()) { optionals.set(0); } oprot.writeBitSet(optionals, 1); - if (struct.isSetRqst()) { - struct.rqst.write(oprot); + if (struct.isSetReq()) { + struct.req.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.rqst = new FireEventRequest(); - struct.rqst.read(iprot); - struct.setRqstIsSet(true); + struct.req = new PutFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); } } } } - public static class fire_listener_event_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fire_listener_event_result"); + public static class put_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new fire_listener_event_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new fire_listener_event_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new put_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new put_file_metadata_resultTupleSchemeFactory()); } - private FireEventResponse success; // required + private PutFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -172704,16 +175902,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventResponse.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fire_listener_event_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_result.class, metaDataMap); } - public fire_listener_event_result() { + public put_file_metadata_result() { } - public fire_listener_event_result( - FireEventResponse success) + public put_file_metadata_result( + PutFileMetadataResult success) { this(); this.success = success; @@ -172722,14 +175920,14 @@ public fire_listener_event_result( /** * Performs a deep copy on other. */ - public fire_listener_event_result(fire_listener_event_result other) { + public put_file_metadata_result(put_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new FireEventResponse(other.success); + this.success = new PutFileMetadataResult(other.success); } } - public fire_listener_event_result deepCopy() { - return new fire_listener_event_result(this); + public put_file_metadata_result deepCopy() { + return new put_file_metadata_result(this); } @Override @@ -172737,11 +175935,11 @@ public void clear() { this.success = null; } - public FireEventResponse getSuccess() { + public PutFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(FireEventResponse success) { + public void setSuccess(PutFileMetadataResult success) { this.success = success; } @@ -172766,7 +175964,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((FireEventResponse)value); + setSuccess((PutFileMetadataResult)value); } break; @@ -172799,12 +175997,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof fire_listener_event_result) - return this.equals((fire_listener_event_result)that); + if (that instanceof put_file_metadata_result) + return this.equals((put_file_metadata_result)that); return false; } - public boolean equals(fire_listener_event_result that) { + public boolean equals(put_file_metadata_result that) { if (that == null) return false; @@ -172833,7 +176031,7 @@ public int hashCode() { } @Override - public int compareTo(fire_listener_event_result other) { + public int compareTo(put_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -172867,7 +176065,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("fire_listener_event_result("); + StringBuilder sb = new StringBuilder("put_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -172905,15 +176103,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class fire_listener_event_resultStandardSchemeFactory implements SchemeFactory { - public fire_listener_event_resultStandardScheme getScheme() { - return new fire_listener_event_resultStandardScheme(); + private static class put_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public put_file_metadata_resultStandardScheme getScheme() { + return new put_file_metadata_resultStandardScheme(); } } - private static class fire_listener_event_resultStandardScheme extends StandardScheme { + private static class put_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -172925,7 +176123,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new FireEventResponse(); + struct.success = new PutFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -172941,7 +176139,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, fire_listener_event struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -172956,16 +176154,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, fire_listener_even } - private static class fire_listener_event_resultTupleSchemeFactory implements SchemeFactory { - public fire_listener_event_resultTupleScheme getScheme() { - return new fire_listener_event_resultTupleScheme(); + private static class put_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public put_file_metadata_resultTupleScheme getScheme() { + return new put_file_metadata_resultTupleScheme(); } } - private static class fire_listener_event_resultTupleScheme extends TupleScheme { + private static class put_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -172978,11 +176176,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, fire_listener_event } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new FireEventResponse(); + struct.success = new PutFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -172991,20 +176189,22 @@ public void read(org.apache.thrift.protocol.TProtocol prot, fire_listener_event_ } - public static class flushCache_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_args"); + public static class clear_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_args"); + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new flushCache_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new flushCache_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new clear_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clear_file_metadata_argsTupleSchemeFactory()); } + private ClearFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + REQ((short)1, "req"); private static final Map byName = new HashMap(); @@ -173019,6 +176219,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 1: // REQ + return REQ; default: return null; } @@ -173057,37 +176259,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_args.class, metaDataMap); } - public flushCache_args() { + public clear_file_metadata_args() { + } + + public clear_file_metadata_args( + ClearFileMetadataRequest req) + { + this(); + this.req = req; } /** * Performs a deep copy on other. */ - public flushCache_args(flushCache_args other) { + public clear_file_metadata_args(clear_file_metadata_args other) { + if (other.isSetReq()) { + this.req = new ClearFileMetadataRequest(other.req); + } } - public flushCache_args deepCopy() { - return new flushCache_args(this); + public clear_file_metadata_args deepCopy() { + return new clear_file_metadata_args(this); } @Override public void clear() { + this.req = null; + } + + public ClearFileMetadataRequest getReq() { + return this.req; + } + + public void setReq(ClearFileMetadataRequest req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((ClearFileMetadataRequest)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case REQ: + return getReq(); + } throw new IllegalStateException(); } @@ -173099,6 +176350,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case REQ: + return isSetReq(); } throw new IllegalStateException(); } @@ -173107,15 +176360,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof flushCache_args) - return this.equals((flushCache_args)that); + if (that instanceof clear_file_metadata_args) + return this.equals((clear_file_metadata_args)that); return false; } - public boolean equals(flushCache_args that) { + public boolean equals(clear_file_metadata_args that) { if (that == null) return false; + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + return true; } @@ -173123,17 +176385,32 @@ public boolean equals(flushCache_args that) { public int hashCode() { List list = new ArrayList(); + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + return list.hashCode(); } @Override - public int compareTo(flushCache_args other) { + public int compareTo(clear_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -173151,9 +176428,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("flushCache_args("); + StringBuilder sb = new StringBuilder("clear_file_metadata_args("); boolean first = true; + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; sb.append(")"); return sb.toString(); } @@ -173161,6 +176445,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (req != null) { + req.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -173179,15 +176466,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class flushCache_argsStandardSchemeFactory implements SchemeFactory { - public flushCache_argsStandardScheme getScheme() { - return new flushCache_argsStandardScheme(); + private static class clear_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public clear_file_metadata_argsStandardScheme getScheme() { + return new clear_file_metadata_argsStandardScheme(); } } - private static class flushCache_argsStandardScheme extends StandardScheme { + private static class clear_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173197,6 +176484,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_args str break; } switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new ClearFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -173206,51 +176502,72 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_args str struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class flushCache_argsTupleSchemeFactory implements SchemeFactory { - public flushCache_argsTupleScheme getScheme() { - return new flushCache_argsTupleScheme(); + private static class clear_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public clear_file_metadata_argsTupleScheme getScheme() { + return new clear_file_metadata_argsTupleScheme(); } } - private static class flushCache_argsTupleScheme extends TupleScheme { + private static class clear_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new ClearFileMetadataRequest(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } } } } - public static class flushCache_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("flushCache_result"); + public static class clear_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_result"); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new flushCache_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new flushCache_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new clear_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clear_file_metadata_resultTupleSchemeFactory()); } + private ClearFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { -; + SUCCESS((short)0, "success"); private static final Map byName = new HashMap(); @@ -173265,6 +176582,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; default: return null; } @@ -173303,37 +176622,86 @@ public String getFieldName() { return _fieldName; } } + + // isset id assignments public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(flushCache_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_result.class, metaDataMap); } - public flushCache_result() { + public clear_file_metadata_result() { + } + + public clear_file_metadata_result( + ClearFileMetadataResult success) + { + this(); + this.success = success; } /** * Performs a deep copy on other. */ - public flushCache_result(flushCache_result other) { + public clear_file_metadata_result(clear_file_metadata_result other) { + if (other.isSetSuccess()) { + this.success = new ClearFileMetadataResult(other.success); + } } - public flushCache_result deepCopy() { - return new flushCache_result(this); + public clear_file_metadata_result deepCopy() { + return new clear_file_metadata_result(this); } @Override public void clear() { + this.success = null; + } + + public ClearFileMetadataResult getSuccess() { + return this.success; + } + + public void setSuccess(ClearFileMetadataResult success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((ClearFileMetadataResult)value); + } + break; + } } public Object getFieldValue(_Fields field) { switch (field) { + case SUCCESS: + return getSuccess(); + } throw new IllegalStateException(); } @@ -173345,6 +176713,8 @@ public boolean isSet(_Fields field) { } switch (field) { + case SUCCESS: + return isSetSuccess(); } throw new IllegalStateException(); } @@ -173353,15 +176723,24 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof flushCache_result) - return this.equals((flushCache_result)that); + if (that instanceof clear_file_metadata_result) + return this.equals((clear_file_metadata_result)that); return false; } - public boolean equals(flushCache_result that) { + public boolean equals(clear_file_metadata_result that) { if (that == null) return false; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + return true; } @@ -173369,17 +176748,32 @@ public boolean equals(flushCache_result that) { public int hashCode() { List list = new ArrayList(); + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + return list.hashCode(); } @Override - public int compareTo(flushCache_result other) { + public int compareTo(clear_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -173397,9 +176791,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("flushCache_result("); + StringBuilder sb = new StringBuilder("clear_file_metadata_result("); boolean first = true; + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; sb.append(")"); return sb.toString(); } @@ -173407,6 +176808,9 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity + if (success != null) { + success.validate(); + } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -173425,15 +176829,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class flushCache_resultStandardSchemeFactory implements SchemeFactory { - public flushCache_resultStandardScheme getScheme() { - return new flushCache_resultStandardScheme(); + private static class clear_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public clear_file_metadata_resultStandardScheme getScheme() { + return new clear_file_metadata_resultStandardScheme(); } } - private static class flushCache_resultStandardScheme extends StandardScheme { + private static class clear_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173443,6 +176847,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result s break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new ClearFileMetadataResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -173452,49 +176865,68 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, flushCache_result s struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, flushCache_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class flushCache_resultTupleSchemeFactory implements SchemeFactory { - public flushCache_resultTupleScheme getScheme() { - return new flushCache_resultTupleScheme(); + private static class clear_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public clear_file_metadata_resultTupleScheme getScheme() { + return new clear_file_metadata_resultTupleScheme(); } } - private static class flushCache_resultTupleScheme extends TupleScheme { + private static class clear_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, flushCache_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new ClearFileMetadataResult(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } } } } - public static class get_file_metadata_by_expr_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_args"); + public static class cache_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_by_expr_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_by_expr_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new cache_file_metadata_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new cache_file_metadata_argsTupleSchemeFactory()); } - private GetFileMetadataByExprRequest req; // required + private CacheFileMetadataRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -173559,16 +176991,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_args.class, metaDataMap); } - public get_file_metadata_by_expr_args() { + public cache_file_metadata_args() { } - public get_file_metadata_by_expr_args( - GetFileMetadataByExprRequest req) + public cache_file_metadata_args( + CacheFileMetadataRequest req) { this(); this.req = req; @@ -173577,14 +177009,14 @@ public get_file_metadata_by_expr_args( /** * Performs a deep copy on other. */ - public get_file_metadata_by_expr_args(get_file_metadata_by_expr_args other) { + public cache_file_metadata_args(cache_file_metadata_args other) { if (other.isSetReq()) { - this.req = new GetFileMetadataByExprRequest(other.req); + this.req = new CacheFileMetadataRequest(other.req); } } - public get_file_metadata_by_expr_args deepCopy() { - return new get_file_metadata_by_expr_args(this); + public cache_file_metadata_args deepCopy() { + return new cache_file_metadata_args(this); } @Override @@ -173592,11 +177024,11 @@ public void clear() { this.req = null; } - public GetFileMetadataByExprRequest getReq() { + public CacheFileMetadataRequest getReq() { return this.req; } - public void setReq(GetFileMetadataByExprRequest req) { + public void setReq(CacheFileMetadataRequest req) { this.req = req; } @@ -173621,7 +177053,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((GetFileMetadataByExprRequest)value); + setReq((CacheFileMetadataRequest)value); } break; @@ -173654,12 +177086,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_by_expr_args) - return this.equals((get_file_metadata_by_expr_args)that); + if (that instanceof cache_file_metadata_args) + return this.equals((cache_file_metadata_args)that); return false; } - public boolean equals(get_file_metadata_by_expr_args that) { + public boolean equals(cache_file_metadata_args that) { if (that == null) return false; @@ -173688,7 +177120,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_by_expr_args other) { + public int compareTo(cache_file_metadata_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -173722,7 +177154,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_args("); + StringBuilder sb = new StringBuilder("cache_file_metadata_args("); boolean first = true; sb.append("req:"); @@ -173760,15 +177192,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_by_expr_argsStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_argsStandardScheme getScheme() { - return new get_file_metadata_by_expr_argsStandardScheme(); + private static class cache_file_metadata_argsStandardSchemeFactory implements SchemeFactory { + public cache_file_metadata_argsStandardScheme getScheme() { + return new cache_file_metadata_argsStandardScheme(); } } - private static class get_file_metadata_by_expr_argsStandardScheme extends StandardScheme { + private static class cache_file_metadata_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -173780,7 +177212,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new GetFileMetadataByExprRequest(); + struct.req = new CacheFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -173796,7 +177228,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -173811,16 +177243,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_by_expr_argsTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_argsTupleScheme getScheme() { - return new get_file_metadata_by_expr_argsTupleScheme(); + private static class cache_file_metadata_argsTupleSchemeFactory implements SchemeFactory { + public cache_file_metadata_argsTupleScheme getScheme() { + return new cache_file_metadata_argsTupleScheme(); } } - private static class get_file_metadata_by_expr_argsTupleScheme extends TupleScheme { + private static class cache_file_metadata_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -173833,11 +177265,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_b } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new GetFileMetadataByExprRequest(); + struct.req = new CacheFileMetadataRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -173846,18 +177278,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by } - public static class get_file_metadata_by_expr_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_by_expr_result"); + public static class cache_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_by_expr_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_by_expr_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new cache_file_metadata_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new cache_file_metadata_resultTupleSchemeFactory()); } - private GetFileMetadataByExprResult success; // required + private CacheFileMetadataResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -173922,16 +177354,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataByExprResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_by_expr_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_result.class, metaDataMap); } - public get_file_metadata_by_expr_result() { + public cache_file_metadata_result() { } - public get_file_metadata_by_expr_result( - GetFileMetadataByExprResult success) + public cache_file_metadata_result( + CacheFileMetadataResult success) { this(); this.success = success; @@ -173940,14 +177372,14 @@ public get_file_metadata_by_expr_result( /** * Performs a deep copy on other. */ - public get_file_metadata_by_expr_result(get_file_metadata_by_expr_result other) { + public cache_file_metadata_result(cache_file_metadata_result other) { if (other.isSetSuccess()) { - this.success = new GetFileMetadataByExprResult(other.success); + this.success = new CacheFileMetadataResult(other.success); } } - public get_file_metadata_by_expr_result deepCopy() { - return new get_file_metadata_by_expr_result(this); + public cache_file_metadata_result deepCopy() { + return new cache_file_metadata_result(this); } @Override @@ -173955,11 +177387,11 @@ public void clear() { this.success = null; } - public GetFileMetadataByExprResult getSuccess() { + public CacheFileMetadataResult getSuccess() { return this.success; } - public void setSuccess(GetFileMetadataByExprResult success) { + public void setSuccess(CacheFileMetadataResult success) { this.success = success; } @@ -173984,7 +177416,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((GetFileMetadataByExprResult)value); + setSuccess((CacheFileMetadataResult)value); } break; @@ -174017,12 +177449,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_by_expr_result) - return this.equals((get_file_metadata_by_expr_result)that); + if (that instanceof cache_file_metadata_result) + return this.equals((cache_file_metadata_result)that); return false; } - public boolean equals(get_file_metadata_by_expr_result that) { + public boolean equals(cache_file_metadata_result that) { if (that == null) return false; @@ -174051,7 +177483,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_by_expr_result other) { + public int compareTo(cache_file_metadata_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -174085,7 +177517,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_by_expr_result("); + StringBuilder sb = new StringBuilder("cache_file_metadata_result("); boolean first = true; sb.append("success:"); @@ -174123,15 +177555,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_by_expr_resultStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_resultStandardScheme getScheme() { - return new get_file_metadata_by_expr_resultStandardScheme(); + private static class cache_file_metadata_resultStandardSchemeFactory implements SchemeFactory { + public cache_file_metadata_resultStandardScheme getScheme() { + return new cache_file_metadata_resultStandardScheme(); } } - private static class get_file_metadata_by_expr_resultStandardScheme extends StandardScheme { + private static class cache_file_metadata_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174143,7 +177575,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetFileMetadataByExprResult(); + struct.success = new CacheFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -174159,7 +177591,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_b struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -174174,16 +177606,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_by_expr_resultTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_by_expr_resultTupleScheme getScheme() { - return new get_file_metadata_by_expr_resultTupleScheme(); + private static class cache_file_metadata_resultTupleSchemeFactory implements SchemeFactory { + public cache_file_metadata_resultTupleScheme getScheme() { + return new cache_file_metadata_resultTupleScheme(); } } - private static class get_file_metadata_by_expr_resultTupleScheme extends TupleScheme { + private static class cache_file_metadata_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -174196,11 +177628,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_b } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by_expr_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetFileMetadataByExprResult(); + struct.success = new CacheFileMetadataResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -174209,18 +177641,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_by } - public static class get_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_args"); + public static class get_next_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_write_id_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_next_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_write_id_argsTupleSchemeFactory()); } - private GetFileMetadataRequest req; // required + private GetNextWriteIdRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -174285,16 +177717,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetNextWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_write_id_args.class, metaDataMap); } - public get_file_metadata_args() { + public get_next_write_id_args() { } - public get_file_metadata_args( - GetFileMetadataRequest req) + public get_next_write_id_args( + GetNextWriteIdRequest req) { this(); this.req = req; @@ -174303,14 +177735,14 @@ public get_file_metadata_args( /** * Performs a deep copy on other. */ - public get_file_metadata_args(get_file_metadata_args other) { + public get_next_write_id_args(get_next_write_id_args other) { if (other.isSetReq()) { - this.req = new GetFileMetadataRequest(other.req); + this.req = new GetNextWriteIdRequest(other.req); } } - public get_file_metadata_args deepCopy() { - return new get_file_metadata_args(this); + public get_next_write_id_args deepCopy() { + return new get_next_write_id_args(this); } @Override @@ -174318,11 +177750,11 @@ public void clear() { this.req = null; } - public GetFileMetadataRequest getReq() { + public GetNextWriteIdRequest getReq() { return this.req; } - public void setReq(GetFileMetadataRequest req) { + public void setReq(GetNextWriteIdRequest req) { this.req = req; } @@ -174347,7 +177779,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((GetFileMetadataRequest)value); + setReq((GetNextWriteIdRequest)value); } break; @@ -174380,12 +177812,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_args) - return this.equals((get_file_metadata_args)that); + if (that instanceof get_next_write_id_args) + return this.equals((get_next_write_id_args)that); return false; } - public boolean equals(get_file_metadata_args that) { + public boolean equals(get_next_write_id_args that) { if (that == null) return false; @@ -174414,7 +177846,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_args other) { + public int compareTo(get_next_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -174448,7 +177880,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_args("); + StringBuilder sb = new StringBuilder("get_next_write_id_args("); boolean first = true; sb.append("req:"); @@ -174486,15 +177918,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_argsStandardScheme getScheme() { - return new get_file_metadata_argsStandardScheme(); + private static class get_next_write_id_argsStandardSchemeFactory implements SchemeFactory { + public get_next_write_id_argsStandardScheme getScheme() { + return new get_next_write_id_argsStandardScheme(); } } - private static class get_file_metadata_argsStandardScheme extends StandardScheme { + private static class get_next_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174506,7 +177938,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_a switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new GetFileMetadataRequest(); + struct.req = new GetNextWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -174522,7 +177954,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -174537,16 +177969,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_argsTupleScheme getScheme() { - return new get_file_metadata_argsTupleScheme(); + private static class get_next_write_id_argsTupleSchemeFactory implements SchemeFactory { + public get_next_write_id_argsTupleScheme getScheme() { + return new get_next_write_id_argsTupleScheme(); } } - private static class get_file_metadata_argsTupleScheme extends TupleScheme { + private static class get_next_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -174559,11 +177991,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_a } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new GetFileMetadataRequest(); + struct.req = new GetNextWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -174572,18 +178004,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_ar } - public static class get_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_file_metadata_result"); + public static class get_next_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_next_write_id_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_next_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_next_write_id_resultTupleSchemeFactory()); } - private GetFileMetadataResult success; // required + private GetNextWriteIdResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -174648,16 +178080,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetNextWriteIdResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_next_write_id_result.class, metaDataMap); } - public get_file_metadata_result() { + public get_next_write_id_result() { } - public get_file_metadata_result( - GetFileMetadataResult success) + public get_next_write_id_result( + GetNextWriteIdResult success) { this(); this.success = success; @@ -174666,14 +178098,14 @@ public get_file_metadata_result( /** * Performs a deep copy on other. */ - public get_file_metadata_result(get_file_metadata_result other) { + public get_next_write_id_result(get_next_write_id_result other) { if (other.isSetSuccess()) { - this.success = new GetFileMetadataResult(other.success); + this.success = new GetNextWriteIdResult(other.success); } } - public get_file_metadata_result deepCopy() { - return new get_file_metadata_result(this); + public get_next_write_id_result deepCopy() { + return new get_next_write_id_result(this); } @Override @@ -174681,11 +178113,11 @@ public void clear() { this.success = null; } - public GetFileMetadataResult getSuccess() { + public GetNextWriteIdResult getSuccess() { return this.success; } - public void setSuccess(GetFileMetadataResult success) { + public void setSuccess(GetNextWriteIdResult success) { this.success = success; } @@ -174710,7 +178142,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((GetFileMetadataResult)value); + setSuccess((GetNextWriteIdResult)value); } break; @@ -174743,12 +178175,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_file_metadata_result) - return this.equals((get_file_metadata_result)that); + if (that instanceof get_next_write_id_result) + return this.equals((get_next_write_id_result)that); return false; } - public boolean equals(get_file_metadata_result that) { + public boolean equals(get_next_write_id_result that) { if (that == null) return false; @@ -174777,7 +178209,7 @@ public int hashCode() { } @Override - public int compareTo(get_file_metadata_result other) { + public int compareTo(get_next_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -174811,7 +178243,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_file_metadata_result("); + StringBuilder sb = new StringBuilder("get_next_write_id_result("); boolean first = true; sb.append("success:"); @@ -174849,15 +178281,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public get_file_metadata_resultStandardScheme getScheme() { - return new get_file_metadata_resultStandardScheme(); + private static class get_next_write_id_resultStandardSchemeFactory implements SchemeFactory { + public get_next_write_id_resultStandardScheme getScheme() { + return new get_next_write_id_resultStandardScheme(); } } - private static class get_file_metadata_resultStandardScheme extends StandardScheme { + private static class get_next_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_next_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -174869,7 +178301,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_r switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new GetFileMetadataResult(); + struct.success = new GetNextWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -174885,7 +178317,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_file_metadata_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_next_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -174900,16 +178332,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_file_metadata_ } - private static class get_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public get_file_metadata_resultTupleScheme getScheme() { - return new get_file_metadata_resultTupleScheme(); + private static class get_next_write_id_resultTupleSchemeFactory implements SchemeFactory { + public get_next_write_id_resultTupleScheme getScheme() { + return new get_next_write_id_resultTupleScheme(); } } - private static class get_file_metadata_resultTupleScheme extends TupleScheme { + private static class get_next_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -174922,11 +178354,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_next_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new GetFileMetadataResult(); + struct.success = new GetNextWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -174935,18 +178367,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_file_metadata_re } - public static class put_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_args"); + public static class finalize_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finalize_write_id_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new put_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new put_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new finalize_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new finalize_write_id_argsTupleSchemeFactory()); } - private PutFileMetadataRequest req; // required + private FinalizeWriteIdRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175011,16 +178443,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FinalizeWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finalize_write_id_args.class, metaDataMap); } - public put_file_metadata_args() { + public finalize_write_id_args() { } - public put_file_metadata_args( - PutFileMetadataRequest req) + public finalize_write_id_args( + FinalizeWriteIdRequest req) { this(); this.req = req; @@ -175029,14 +178461,14 @@ public put_file_metadata_args( /** * Performs a deep copy on other. */ - public put_file_metadata_args(put_file_metadata_args other) { + public finalize_write_id_args(finalize_write_id_args other) { if (other.isSetReq()) { - this.req = new PutFileMetadataRequest(other.req); + this.req = new FinalizeWriteIdRequest(other.req); } } - public put_file_metadata_args deepCopy() { - return new put_file_metadata_args(this); + public finalize_write_id_args deepCopy() { + return new finalize_write_id_args(this); } @Override @@ -175044,11 +178476,11 @@ public void clear() { this.req = null; } - public PutFileMetadataRequest getReq() { + public FinalizeWriteIdRequest getReq() { return this.req; } - public void setReq(PutFileMetadataRequest req) { + public void setReq(FinalizeWriteIdRequest req) { this.req = req; } @@ -175073,7 +178505,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((PutFileMetadataRequest)value); + setReq((FinalizeWriteIdRequest)value); } break; @@ -175106,12 +178538,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof put_file_metadata_args) - return this.equals((put_file_metadata_args)that); + if (that instanceof finalize_write_id_args) + return this.equals((finalize_write_id_args)that); return false; } - public boolean equals(put_file_metadata_args that) { + public boolean equals(finalize_write_id_args that) { if (that == null) return false; @@ -175140,7 +178572,7 @@ public int hashCode() { } @Override - public int compareTo(put_file_metadata_args other) { + public int compareTo(finalize_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175174,7 +178606,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("put_file_metadata_args("); + StringBuilder sb = new StringBuilder("finalize_write_id_args("); boolean first = true; sb.append("req:"); @@ -175212,15 +178644,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class put_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public put_file_metadata_argsStandardScheme getScheme() { - return new put_file_metadata_argsStandardScheme(); + private static class finalize_write_id_argsStandardSchemeFactory implements SchemeFactory { + public finalize_write_id_argsStandardScheme getScheme() { + return new finalize_write_id_argsStandardScheme(); } } - private static class put_file_metadata_argsStandardScheme extends StandardScheme { + private static class finalize_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175232,7 +178664,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_a switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new PutFileMetadataRequest(); + struct.req = new FinalizeWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -175248,7 +178680,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, finalize_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175263,16 +178695,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_ } - private static class put_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public put_file_metadata_argsTupleScheme getScheme() { - return new put_file_metadata_argsTupleScheme(); + private static class finalize_write_id_argsTupleSchemeFactory implements SchemeFactory { + public finalize_write_id_argsTupleScheme getScheme() { + return new finalize_write_id_argsTupleScheme(); } } - private static class put_file_metadata_argsTupleScheme extends TupleScheme { + private static class finalize_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -175285,11 +178717,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_a } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new PutFileMetadataRequest(); + struct.req = new FinalizeWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -175298,18 +178730,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_ar } - public static class put_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("put_file_metadata_result"); + public static class finalize_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("finalize_write_id_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new put_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new put_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new finalize_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new finalize_write_id_resultTupleSchemeFactory()); } - private PutFileMetadataResult success; // required + private FinalizeWriteIdResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175374,16 +178806,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PutFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FinalizeWriteIdResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(put_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(finalize_write_id_result.class, metaDataMap); } - public put_file_metadata_result() { + public finalize_write_id_result() { } - public put_file_metadata_result( - PutFileMetadataResult success) + public finalize_write_id_result( + FinalizeWriteIdResult success) { this(); this.success = success; @@ -175392,14 +178824,14 @@ public put_file_metadata_result( /** * Performs a deep copy on other. */ - public put_file_metadata_result(put_file_metadata_result other) { + public finalize_write_id_result(finalize_write_id_result other) { if (other.isSetSuccess()) { - this.success = new PutFileMetadataResult(other.success); + this.success = new FinalizeWriteIdResult(other.success); } } - public put_file_metadata_result deepCopy() { - return new put_file_metadata_result(this); + public finalize_write_id_result deepCopy() { + return new finalize_write_id_result(this); } @Override @@ -175407,11 +178839,11 @@ public void clear() { this.success = null; } - public PutFileMetadataResult getSuccess() { + public FinalizeWriteIdResult getSuccess() { return this.success; } - public void setSuccess(PutFileMetadataResult success) { + public void setSuccess(FinalizeWriteIdResult success) { this.success = success; } @@ -175436,7 +178868,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((PutFileMetadataResult)value); + setSuccess((FinalizeWriteIdResult)value); } break; @@ -175469,12 +178901,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof put_file_metadata_result) - return this.equals((put_file_metadata_result)that); + if (that instanceof finalize_write_id_result) + return this.equals((finalize_write_id_result)that); return false; } - public boolean equals(put_file_metadata_result that) { + public boolean equals(finalize_write_id_result that) { if (that == null) return false; @@ -175503,7 +178935,7 @@ public int hashCode() { } @Override - public int compareTo(put_file_metadata_result other) { + public int compareTo(finalize_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175537,7 +178969,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("put_file_metadata_result("); + StringBuilder sb = new StringBuilder("finalize_write_id_result("); boolean first = true; sb.append("success:"); @@ -175575,15 +179007,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class put_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public put_file_metadata_resultStandardScheme getScheme() { - return new put_file_metadata_resultStandardScheme(); + private static class finalize_write_id_resultStandardSchemeFactory implements SchemeFactory { + public finalize_write_id_resultStandardScheme getScheme() { + return new finalize_write_id_resultStandardScheme(); } } - private static class put_file_metadata_resultStandardScheme extends StandardScheme { + private static class finalize_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, finalize_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175595,7 +179027,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_r switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new PutFileMetadataResult(); + struct.success = new FinalizeWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -175611,7 +179043,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, put_file_metadata_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, finalize_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175626,16 +179058,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, put_file_metadata_ } - private static class put_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public put_file_metadata_resultTupleScheme getScheme() { - return new put_file_metadata_resultTupleScheme(); + private static class finalize_write_id_resultTupleSchemeFactory implements SchemeFactory { + public finalize_write_id_resultTupleScheme getScheme() { + return new finalize_write_id_resultTupleScheme(); } } - private static class put_file_metadata_resultTupleScheme extends TupleScheme { + private static class finalize_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -175648,11 +179080,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, finalize_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new PutFileMetadataResult(); + struct.success = new FinalizeWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -175661,18 +179093,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, put_file_metadata_re } - public static class clear_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_args"); + public static class heartbeat_write_id_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_write_id_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new clear_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new clear_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new heartbeat_write_id_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new heartbeat_write_id_argsTupleSchemeFactory()); } - private ClearFileMetadataRequest req; // required + private HeartbeatWriteIdRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -175737,16 +179169,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatWriteIdRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_write_id_args.class, metaDataMap); } - public clear_file_metadata_args() { + public heartbeat_write_id_args() { } - public clear_file_metadata_args( - ClearFileMetadataRequest req) + public heartbeat_write_id_args( + HeartbeatWriteIdRequest req) { this(); this.req = req; @@ -175755,14 +179187,14 @@ public clear_file_metadata_args( /** * Performs a deep copy on other. */ - public clear_file_metadata_args(clear_file_metadata_args other) { + public heartbeat_write_id_args(heartbeat_write_id_args other) { if (other.isSetReq()) { - this.req = new ClearFileMetadataRequest(other.req); + this.req = new HeartbeatWriteIdRequest(other.req); } } - public clear_file_metadata_args deepCopy() { - return new clear_file_metadata_args(this); + public heartbeat_write_id_args deepCopy() { + return new heartbeat_write_id_args(this); } @Override @@ -175770,11 +179202,11 @@ public void clear() { this.req = null; } - public ClearFileMetadataRequest getReq() { + public HeartbeatWriteIdRequest getReq() { return this.req; } - public void setReq(ClearFileMetadataRequest req) { + public void setReq(HeartbeatWriteIdRequest req) { this.req = req; } @@ -175799,7 +179231,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((ClearFileMetadataRequest)value); + setReq((HeartbeatWriteIdRequest)value); } break; @@ -175832,12 +179264,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof clear_file_metadata_args) - return this.equals((clear_file_metadata_args)that); + if (that instanceof heartbeat_write_id_args) + return this.equals((heartbeat_write_id_args)that); return false; } - public boolean equals(clear_file_metadata_args that) { + public boolean equals(heartbeat_write_id_args that) { if (that == null) return false; @@ -175866,7 +179298,7 @@ public int hashCode() { } @Override - public int compareTo(clear_file_metadata_args other) { + public int compareTo(heartbeat_write_id_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -175900,7 +179332,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("clear_file_metadata_args("); + StringBuilder sb = new StringBuilder("heartbeat_write_id_args("); boolean first = true; sb.append("req:"); @@ -175938,15 +179370,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class clear_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public clear_file_metadata_argsStandardScheme getScheme() { - return new clear_file_metadata_argsStandardScheme(); + private static class heartbeat_write_id_argsStandardSchemeFactory implements SchemeFactory { + public heartbeat_write_id_argsStandardScheme getScheme() { + return new heartbeat_write_id_argsStandardScheme(); } } - private static class clear_file_metadata_argsStandardScheme extends StandardScheme { + private static class heartbeat_write_id_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -175958,7 +179390,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new ClearFileMetadataRequest(); + struct.req = new HeartbeatWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -175974,7 +179406,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -175989,16 +179421,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadat } - private static class clear_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public clear_file_metadata_argsTupleScheme getScheme() { - return new clear_file_metadata_argsTupleScheme(); + private static class heartbeat_write_id_argsTupleSchemeFactory implements SchemeFactory { + public heartbeat_write_id_argsTupleScheme getScheme() { + return new heartbeat_write_id_argsTupleScheme(); } } - private static class clear_file_metadata_argsTupleScheme extends TupleScheme { + private static class heartbeat_write_id_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -176011,11 +179443,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new ClearFileMetadataRequest(); + struct.req = new HeartbeatWriteIdRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -176024,18 +179456,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_ } - public static class clear_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_file_metadata_result"); + public static class heartbeat_write_id_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("heartbeat_write_id_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new clear_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new clear_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new heartbeat_write_id_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new heartbeat_write_id_resultTupleSchemeFactory()); } - private ClearFileMetadataResult success; // required + private HeartbeatWriteIdResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -176100,16 +179532,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClearFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatWriteIdResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(heartbeat_write_id_result.class, metaDataMap); } - public clear_file_metadata_result() { + public heartbeat_write_id_result() { } - public clear_file_metadata_result( - ClearFileMetadataResult success) + public heartbeat_write_id_result( + HeartbeatWriteIdResult success) { this(); this.success = success; @@ -176118,14 +179550,14 @@ public clear_file_metadata_result( /** * Performs a deep copy on other. */ - public clear_file_metadata_result(clear_file_metadata_result other) { + public heartbeat_write_id_result(heartbeat_write_id_result other) { if (other.isSetSuccess()) { - this.success = new ClearFileMetadataResult(other.success); + this.success = new HeartbeatWriteIdResult(other.success); } } - public clear_file_metadata_result deepCopy() { - return new clear_file_metadata_result(this); + public heartbeat_write_id_result deepCopy() { + return new heartbeat_write_id_result(this); } @Override @@ -176133,11 +179565,11 @@ public void clear() { this.success = null; } - public ClearFileMetadataResult getSuccess() { + public HeartbeatWriteIdResult getSuccess() { return this.success; } - public void setSuccess(ClearFileMetadataResult success) { + public void setSuccess(HeartbeatWriteIdResult success) { this.success = success; } @@ -176162,7 +179594,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((ClearFileMetadataResult)value); + setSuccess((HeartbeatWriteIdResult)value); } break; @@ -176195,12 +179627,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof clear_file_metadata_result) - return this.equals((clear_file_metadata_result)that); + if (that instanceof heartbeat_write_id_result) + return this.equals((heartbeat_write_id_result)that); return false; } - public boolean equals(clear_file_metadata_result that) { + public boolean equals(heartbeat_write_id_result that) { if (that == null) return false; @@ -176229,7 +179661,7 @@ public int hashCode() { } @Override - public int compareTo(clear_file_metadata_result other) { + public int compareTo(heartbeat_write_id_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -176263,7 +179695,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("clear_file_metadata_result("); + StringBuilder sb = new StringBuilder("heartbeat_write_id_result("); boolean first = true; sb.append("success:"); @@ -176301,15 +179733,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class clear_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public clear_file_metadata_resultStandardScheme getScheme() { - return new clear_file_metadata_resultStandardScheme(); + private static class heartbeat_write_id_resultStandardSchemeFactory implements SchemeFactory { + public heartbeat_write_id_resultStandardScheme getScheme() { + return new heartbeat_write_id_resultStandardScheme(); } } - private static class clear_file_metadata_resultStandardScheme extends StandardScheme { + private static class heartbeat_write_id_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176321,7 +179753,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new ClearFileMetadataResult(); + struct.success = new HeartbeatWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -176337,7 +179769,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, clear_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -176352,16 +179784,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, clear_file_metadat } - private static class clear_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public clear_file_metadata_resultTupleScheme getScheme() { - return new clear_file_metadata_resultTupleScheme(); + private static class heartbeat_write_id_resultTupleSchemeFactory implements SchemeFactory { + public heartbeat_write_id_resultTupleScheme getScheme() { + return new heartbeat_write_id_resultTupleScheme(); } } - private static class clear_file_metadata_resultTupleScheme extends TupleScheme { + private static class heartbeat_write_id_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -176374,11 +179806,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, heartbeat_write_id_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new ClearFileMetadataResult(); + struct.success = new HeartbeatWriteIdResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -176387,18 +179819,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, clear_file_metadata_ } - public static class cache_file_metadata_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_args"); + public static class get_valid_write_ids_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_valid_write_ids_args"); private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new cache_file_metadata_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new cache_file_metadata_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_valid_write_ids_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_valid_write_ids_argsTupleSchemeFactory()); } - private CacheFileMetadataRequest req; // required + private GetValidWriteIdsRequest req; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -176463,16 +179895,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetValidWriteIdsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_valid_write_ids_args.class, metaDataMap); } - public cache_file_metadata_args() { + public get_valid_write_ids_args() { } - public cache_file_metadata_args( - CacheFileMetadataRequest req) + public get_valid_write_ids_args( + GetValidWriteIdsRequest req) { this(); this.req = req; @@ -176481,14 +179913,14 @@ public cache_file_metadata_args( /** * Performs a deep copy on other. */ - public cache_file_metadata_args(cache_file_metadata_args other) { + public get_valid_write_ids_args(get_valid_write_ids_args other) { if (other.isSetReq()) { - this.req = new CacheFileMetadataRequest(other.req); + this.req = new GetValidWriteIdsRequest(other.req); } } - public cache_file_metadata_args deepCopy() { - return new cache_file_metadata_args(this); + public get_valid_write_ids_args deepCopy() { + return new get_valid_write_ids_args(this); } @Override @@ -176496,11 +179928,11 @@ public void clear() { this.req = null; } - public CacheFileMetadataRequest getReq() { + public GetValidWriteIdsRequest getReq() { return this.req; } - public void setReq(CacheFileMetadataRequest req) { + public void setReq(GetValidWriteIdsRequest req) { this.req = req; } @@ -176525,7 +179957,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetReq(); } else { - setReq((CacheFileMetadataRequest)value); + setReq((GetValidWriteIdsRequest)value); } break; @@ -176558,12 +179990,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof cache_file_metadata_args) - return this.equals((cache_file_metadata_args)that); + if (that instanceof get_valid_write_ids_args) + return this.equals((get_valid_write_ids_args)that); return false; } - public boolean equals(cache_file_metadata_args that) { + public boolean equals(get_valid_write_ids_args that) { if (that == null) return false; @@ -176592,7 +180024,7 @@ public int hashCode() { } @Override - public int compareTo(cache_file_metadata_args other) { + public int compareTo(get_valid_write_ids_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -176626,7 +180058,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("cache_file_metadata_args("); + StringBuilder sb = new StringBuilder("get_valid_write_ids_args("); boolean first = true; sb.append("req:"); @@ -176664,15 +180096,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class cache_file_metadata_argsStandardSchemeFactory implements SchemeFactory { - public cache_file_metadata_argsStandardScheme getScheme() { - return new cache_file_metadata_argsStandardScheme(); + private static class get_valid_write_ids_argsStandardSchemeFactory implements SchemeFactory { + public get_valid_write_ids_argsStandardScheme getScheme() { + return new get_valid_write_ids_argsStandardScheme(); } } - private static class cache_file_metadata_argsStandardScheme extends StandardScheme { + private static class get_valid_write_ids_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_valid_write_ids_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -176684,7 +180116,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata switch (schemeField.id) { case 1: // REQ if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.req = new CacheFileMetadataRequest(); + struct.req = new GetValidWriteIdsRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } else { @@ -176700,7 +180132,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_valid_write_ids_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -176715,16 +180147,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadat } - private static class cache_file_metadata_argsTupleSchemeFactory implements SchemeFactory { - public cache_file_metadata_argsTupleScheme getScheme() { - return new cache_file_metadata_argsTupleScheme(); + private static class get_valid_write_ids_argsTupleSchemeFactory implements SchemeFactory { + public get_valid_write_ids_argsTupleScheme getScheme() { + return new get_valid_write_ids_argsTupleScheme(); } } - private static class cache_file_metadata_argsTupleScheme extends TupleScheme { + private static class get_valid_write_ids_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_valid_write_ids_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetReq()) { @@ -176737,11 +180169,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_valid_write_ids_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.req = new CacheFileMetadataRequest(); + struct.req = new GetValidWriteIdsRequest(); struct.req.read(iprot); struct.setReqIsSet(true); } @@ -176750,18 +180182,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_ } - public static class cache_file_metadata_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("cache_file_metadata_result"); + public static class get_valid_write_ids_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_valid_write_ids_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new cache_file_metadata_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new cache_file_metadata_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_valid_write_ids_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_valid_write_ids_resultTupleSchemeFactory()); } - private CacheFileMetadataResult success; // required + private GetValidWriteIdsResult success; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -176826,16 +180258,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, CacheFileMetadataResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, GetValidWriteIdsResult.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(cache_file_metadata_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_valid_write_ids_result.class, metaDataMap); } - public cache_file_metadata_result() { + public get_valid_write_ids_result() { } - public cache_file_metadata_result( - CacheFileMetadataResult success) + public get_valid_write_ids_result( + GetValidWriteIdsResult success) { this(); this.success = success; @@ -176844,14 +180276,14 @@ public cache_file_metadata_result( /** * Performs a deep copy on other. */ - public cache_file_metadata_result(cache_file_metadata_result other) { + public get_valid_write_ids_result(get_valid_write_ids_result other) { if (other.isSetSuccess()) { - this.success = new CacheFileMetadataResult(other.success); + this.success = new GetValidWriteIdsResult(other.success); } } - public cache_file_metadata_result deepCopy() { - return new cache_file_metadata_result(this); + public get_valid_write_ids_result deepCopy() { + return new get_valid_write_ids_result(this); } @Override @@ -176859,11 +180291,11 @@ public void clear() { this.success = null; } - public CacheFileMetadataResult getSuccess() { + public GetValidWriteIdsResult getSuccess() { return this.success; } - public void setSuccess(CacheFileMetadataResult success) { + public void setSuccess(GetValidWriteIdsResult success) { this.success = success; } @@ -176888,7 +180320,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((CacheFileMetadataResult)value); + setSuccess((GetValidWriteIdsResult)value); } break; @@ -176921,12 +180353,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof cache_file_metadata_result) - return this.equals((cache_file_metadata_result)that); + if (that instanceof get_valid_write_ids_result) + return this.equals((get_valid_write_ids_result)that); return false; } - public boolean equals(cache_file_metadata_result that) { + public boolean equals(get_valid_write_ids_result that) { if (that == null) return false; @@ -176955,7 +180387,7 @@ public int hashCode() { } @Override - public int compareTo(cache_file_metadata_result other) { + public int compareTo(get_valid_write_ids_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -176989,7 +180421,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("cache_file_metadata_result("); + StringBuilder sb = new StringBuilder("get_valid_write_ids_result("); boolean first = true; sb.append("success:"); @@ -177027,15 +180459,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class cache_file_metadata_resultStandardSchemeFactory implements SchemeFactory { - public cache_file_metadata_resultStandardScheme getScheme() { - return new cache_file_metadata_resultStandardScheme(); + private static class get_valid_write_ids_resultStandardSchemeFactory implements SchemeFactory { + public get_valid_write_ids_resultStandardScheme getScheme() { + return new get_valid_write_ids_resultStandardScheme(); } } - private static class cache_file_metadata_resultStandardScheme extends StandardScheme { + private static class get_valid_write_ids_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_valid_write_ids_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -177047,7 +180479,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new CacheFileMetadataResult(); + struct.success = new GetValidWriteIdsResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -177063,7 +180495,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, cache_file_metadata struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_valid_write_ids_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -177078,16 +180510,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, cache_file_metadat } - private static class cache_file_metadata_resultTupleSchemeFactory implements SchemeFactory { - public cache_file_metadata_resultTupleScheme getScheme() { - return new cache_file_metadata_resultTupleScheme(); + private static class get_valid_write_ids_resultTupleSchemeFactory implements SchemeFactory { + public get_valid_write_ids_resultTupleScheme getScheme() { + return new get_valid_write_ids_resultTupleScheme(); } } - private static class cache_file_metadata_resultTupleScheme extends TupleScheme { + private static class get_valid_write_ids_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_valid_write_ids_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -177100,11 +180532,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, cache_file_metadata_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_valid_write_ids_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.success = new CacheFileMetadataResult(); + struct.success = new GetValidWriteIdsResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java index 8de8896bff4d..2503d186a492 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java @@ -84,4 +84,6 @@ public class hive_metastoreConstants { public static final String TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties"; + public static final String TABLE_IS_MM = "hivecommit"; + } diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 24b3ba1f6a77..93e0bc861125 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -1171,6 +1171,26 @@ public function clear_file_metadata(\metastore\ClearFileMetadataRequest $req); * @return \metastore\CacheFileMetadataResult */ public function cache_file_metadata(\metastore\CacheFileMetadataRequest $req); + /** + * @param \metastore\GetNextWriteIdRequest $req + * @return \metastore\GetNextWriteIdResult + */ + public function get_next_write_id(\metastore\GetNextWriteIdRequest $req); + /** + * @param \metastore\FinalizeWriteIdRequest $req + * @return \metastore\FinalizeWriteIdResult + */ + public function finalize_write_id(\metastore\FinalizeWriteIdRequest $req); + /** + * @param \metastore\HeartbeatWriteIdRequest $req + * @return \metastore\HeartbeatWriteIdResult + */ + public function heartbeat_write_id(\metastore\HeartbeatWriteIdRequest $req); + /** + * @param \metastore\GetValidWriteIdsRequest $req + * @return \metastore\GetValidWriteIdsResult + */ + public function get_valid_write_ids(\metastore\GetValidWriteIdsRequest $req); } class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf { @@ -9705,6 +9725,210 @@ public function recv_cache_file_metadata() throw new \Exception("cache_file_metadata failed: unknown result"); } + public function get_next_write_id(\metastore\GetNextWriteIdRequest $req) + { + $this->send_get_next_write_id($req); + return $this->recv_get_next_write_id(); + } + + public function send_get_next_write_id(\metastore\GetNextWriteIdRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_get_next_write_id_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_next_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_next_write_id', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_next_write_id() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_next_write_id_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_next_write_id_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("get_next_write_id failed: unknown result"); + } + + public function finalize_write_id(\metastore\FinalizeWriteIdRequest $req) + { + $this->send_finalize_write_id($req); + return $this->recv_finalize_write_id(); + } + + public function send_finalize_write_id(\metastore\FinalizeWriteIdRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_finalize_write_id_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'finalize_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('finalize_write_id', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_finalize_write_id() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_finalize_write_id_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_finalize_write_id_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("finalize_write_id failed: unknown result"); + } + + public function heartbeat_write_id(\metastore\HeartbeatWriteIdRequest $req) + { + $this->send_heartbeat_write_id($req); + return $this->recv_heartbeat_write_id(); + } + + public function send_heartbeat_write_id(\metastore\HeartbeatWriteIdRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_heartbeat_write_id_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'heartbeat_write_id', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('heartbeat_write_id', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_heartbeat_write_id() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_heartbeat_write_id_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_heartbeat_write_id_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("heartbeat_write_id failed: unknown result"); + } + + public function get_valid_write_ids(\metastore\GetValidWriteIdsRequest $req) + { + $this->send_get_valid_write_ids($req); + return $this->recv_get_valid_write_ids(); + } + + public function send_get_valid_write_ids(\metastore\GetValidWriteIdsRequest $req) + { + $args = new \metastore\ThriftHiveMetastore_get_valid_write_ids_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_valid_write_ids', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_valid_write_ids', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_valid_write_ids() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_valid_write_ids_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_valid_write_ids_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("get_valid_write_ids failed: unknown result"); + } + } // HELPER FUNCTIONS AND STRUCTURES @@ -10856,14 +11080,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size569 = 0; - $_etype572 = 0; - $xfer += $input->readListBegin($_etype572, $_size569); - for ($_i573 = 0; $_i573 < $_size569; ++$_i573) + $_size576 = 0; + $_etype579 = 0; + $xfer += $input->readListBegin($_etype579, $_size576); + for ($_i580 = 0; $_i580 < $_size576; ++$_i580) { - $elem574 = null; - $xfer += $input->readString($elem574); - $this->success []= $elem574; + $elem581 = null; + $xfer += $input->readString($elem581); + $this->success []= $elem581; } $xfer += $input->readListEnd(); } else { @@ -10899,9 +11123,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter575) + foreach ($this->success as $iter582) { - $xfer += $output->writeString($iter575); + $xfer += $output->writeString($iter582); } } $output->writeListEnd(); @@ -11032,14 +11256,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size576 = 0; - $_etype579 = 0; - $xfer += $input->readListBegin($_etype579, $_size576); - for ($_i580 = 0; $_i580 < $_size576; ++$_i580) + $_size583 = 0; + $_etype586 = 0; + $xfer += $input->readListBegin($_etype586, $_size583); + for ($_i587 = 0; $_i587 < $_size583; ++$_i587) { - $elem581 = null; - $xfer += $input->readString($elem581); - $this->success []= $elem581; + $elem588 = null; + $xfer += $input->readString($elem588); + $this->success []= $elem588; } $xfer += $input->readListEnd(); } else { @@ -11075,9 +11299,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter582) + foreach ($this->success as $iter589) { - $xfer += $output->writeString($iter582); + $xfer += $output->writeString($iter589); } } $output->writeListEnd(); @@ -12078,18 +12302,18 @@ public function read($input) case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size583 = 0; - $_ktype584 = 0; - $_vtype585 = 0; - $xfer += $input->readMapBegin($_ktype584, $_vtype585, $_size583); - for ($_i587 = 0; $_i587 < $_size583; ++$_i587) + $_size590 = 0; + $_ktype591 = 0; + $_vtype592 = 0; + $xfer += $input->readMapBegin($_ktype591, $_vtype592, $_size590); + for ($_i594 = 0; $_i594 < $_size590; ++$_i594) { - $key588 = ''; - $val589 = new \metastore\Type(); - $xfer += $input->readString($key588); - $val589 = new \metastore\Type(); - $xfer += $val589->read($input); - $this->success[$key588] = $val589; + $key595 = ''; + $val596 = new \metastore\Type(); + $xfer += $input->readString($key595); + $val596 = new \metastore\Type(); + $xfer += $val596->read($input); + $this->success[$key595] = $val596; } $xfer += $input->readMapEnd(); } else { @@ -12125,10 +12349,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter590 => $viter591) + foreach ($this->success as $kiter597 => $viter598) { - $xfer += $output->writeString($kiter590); - $xfer += $viter591->write($output); + $xfer += $output->writeString($kiter597); + $xfer += $viter598->write($output); } } $output->writeMapEnd(); @@ -12332,15 +12556,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size592 = 0; - $_etype595 = 0; - $xfer += $input->readListBegin($_etype595, $_size592); - for ($_i596 = 0; $_i596 < $_size592; ++$_i596) + $_size599 = 0; + $_etype602 = 0; + $xfer += $input->readListBegin($_etype602, $_size599); + for ($_i603 = 0; $_i603 < $_size599; ++$_i603) { - $elem597 = null; - $elem597 = new \metastore\FieldSchema(); - $xfer += $elem597->read($input); - $this->success []= $elem597; + $elem604 = null; + $elem604 = new \metastore\FieldSchema(); + $xfer += $elem604->read($input); + $this->success []= $elem604; } $xfer += $input->readListEnd(); } else { @@ -12392,9 +12616,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter598) + foreach ($this->success as $iter605) { - $xfer += $iter598->write($output); + $xfer += $iter605->write($output); } } $output->writeListEnd(); @@ -12636,15 +12860,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size599 = 0; - $_etype602 = 0; - $xfer += $input->readListBegin($_etype602, $_size599); - for ($_i603 = 0; $_i603 < $_size599; ++$_i603) + $_size606 = 0; + $_etype609 = 0; + $xfer += $input->readListBegin($_etype609, $_size606); + for ($_i610 = 0; $_i610 < $_size606; ++$_i610) { - $elem604 = null; - $elem604 = new \metastore\FieldSchema(); - $xfer += $elem604->read($input); - $this->success []= $elem604; + $elem611 = null; + $elem611 = new \metastore\FieldSchema(); + $xfer += $elem611->read($input); + $this->success []= $elem611; } $xfer += $input->readListEnd(); } else { @@ -12696,9 +12920,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter605) + foreach ($this->success as $iter612) { - $xfer += $iter605->write($output); + $xfer += $iter612->write($output); } } $output->writeListEnd(); @@ -12912,15 +13136,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size606 = 0; - $_etype609 = 0; - $xfer += $input->readListBegin($_etype609, $_size606); - for ($_i610 = 0; $_i610 < $_size606; ++$_i610) + $_size613 = 0; + $_etype616 = 0; + $xfer += $input->readListBegin($_etype616, $_size613); + for ($_i617 = 0; $_i617 < $_size613; ++$_i617) { - $elem611 = null; - $elem611 = new \metastore\FieldSchema(); - $xfer += $elem611->read($input); - $this->success []= $elem611; + $elem618 = null; + $elem618 = new \metastore\FieldSchema(); + $xfer += $elem618->read($input); + $this->success []= $elem618; } $xfer += $input->readListEnd(); } else { @@ -12972,9 +13196,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter612) + foreach ($this->success as $iter619) { - $xfer += $iter612->write($output); + $xfer += $iter619->write($output); } } $output->writeListEnd(); @@ -13216,15 +13440,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size613 = 0; - $_etype616 = 0; - $xfer += $input->readListBegin($_etype616, $_size613); - for ($_i617 = 0; $_i617 < $_size613; ++$_i617) + $_size620 = 0; + $_etype623 = 0; + $xfer += $input->readListBegin($_etype623, $_size620); + for ($_i624 = 0; $_i624 < $_size620; ++$_i624) { - $elem618 = null; - $elem618 = new \metastore\FieldSchema(); - $xfer += $elem618->read($input); - $this->success []= $elem618; + $elem625 = null; + $elem625 = new \metastore\FieldSchema(); + $xfer += $elem625->read($input); + $this->success []= $elem625; } $xfer += $input->readListEnd(); } else { @@ -13276,9 +13500,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter619) + foreach ($this->success as $iter626) { - $xfer += $iter619->write($output); + $xfer += $iter626->write($output); } } $output->writeListEnd(); @@ -13886,15 +14110,15 @@ public function read($input) case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size620 = 0; - $_etype623 = 0; - $xfer += $input->readListBegin($_etype623, $_size620); - for ($_i624 = 0; $_i624 < $_size620; ++$_i624) + $_size627 = 0; + $_etype630 = 0; + $xfer += $input->readListBegin($_etype630, $_size627); + for ($_i631 = 0; $_i631 < $_size627; ++$_i631) { - $elem625 = null; - $elem625 = new \metastore\SQLPrimaryKey(); - $xfer += $elem625->read($input); - $this->primaryKeys []= $elem625; + $elem632 = null; + $elem632 = new \metastore\SQLPrimaryKey(); + $xfer += $elem632->read($input); + $this->primaryKeys []= $elem632; } $xfer += $input->readListEnd(); } else { @@ -13904,15 +14128,15 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size626 = 0; - $_etype629 = 0; - $xfer += $input->readListBegin($_etype629, $_size626); - for ($_i630 = 0; $_i630 < $_size626; ++$_i630) + $_size633 = 0; + $_etype636 = 0; + $xfer += $input->readListBegin($_etype636, $_size633); + for ($_i637 = 0; $_i637 < $_size633; ++$_i637) { - $elem631 = null; - $elem631 = new \metastore\SQLForeignKey(); - $xfer += $elem631->read($input); - $this->foreignKeys []= $elem631; + $elem638 = null; + $elem638 = new \metastore\SQLForeignKey(); + $xfer += $elem638->read($input); + $this->foreignKeys []= $elem638; } $xfer += $input->readListEnd(); } else { @@ -13948,9 +14172,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); { - foreach ($this->primaryKeys as $iter632) + foreach ($this->primaryKeys as $iter639) { - $xfer += $iter632->write($output); + $xfer += $iter639->write($output); } } $output->writeListEnd(); @@ -13965,9 +14189,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); { - foreach ($this->foreignKeys as $iter633) + foreach ($this->foreignKeys as $iter640) { - $xfer += $iter633->write($output); + $xfer += $iter640->write($output); } } $output->writeListEnd(); @@ -15313,14 +15537,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size634 = 0; - $_etype637 = 0; - $xfer += $input->readListBegin($_etype637, $_size634); - for ($_i638 = 0; $_i638 < $_size634; ++$_i638) + $_size641 = 0; + $_etype644 = 0; + $xfer += $input->readListBegin($_etype644, $_size641); + for ($_i645 = 0; $_i645 < $_size641; ++$_i645) { - $elem639 = null; - $xfer += $input->readString($elem639); - $this->success []= $elem639; + $elem646 = null; + $xfer += $input->readString($elem646); + $this->success []= $elem646; } $xfer += $input->readListEnd(); } else { @@ -15356,9 +15580,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter640) + foreach ($this->success as $iter647) { - $xfer += $output->writeString($iter640); + $xfer += $output->writeString($iter647); } } $output->writeListEnd(); @@ -15560,14 +15784,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size641 = 0; - $_etype644 = 0; - $xfer += $input->readListBegin($_etype644, $_size641); - for ($_i645 = 0; $_i645 < $_size641; ++$_i645) + $_size648 = 0; + $_etype651 = 0; + $xfer += $input->readListBegin($_etype651, $_size648); + for ($_i652 = 0; $_i652 < $_size648; ++$_i652) { - $elem646 = null; - $xfer += $input->readString($elem646); - $this->success []= $elem646; + $elem653 = null; + $xfer += $input->readString($elem653); + $this->success []= $elem653; } $xfer += $input->readListEnd(); } else { @@ -15603,9 +15827,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter647) + foreach ($this->success as $iter654) { - $xfer += $output->writeString($iter647); + $xfer += $output->writeString($iter654); } } $output->writeListEnd(); @@ -15710,14 +15934,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size648 = 0; - $_etype651 = 0; - $xfer += $input->readListBegin($_etype651, $_size648); - for ($_i652 = 0; $_i652 < $_size648; ++$_i652) + $_size655 = 0; + $_etype658 = 0; + $xfer += $input->readListBegin($_etype658, $_size655); + for ($_i659 = 0; $_i659 < $_size655; ++$_i659) { - $elem653 = null; - $xfer += $input->readString($elem653); - $this->tbl_types []= $elem653; + $elem660 = null; + $xfer += $input->readString($elem660); + $this->tbl_types []= $elem660; } $xfer += $input->readListEnd(); } else { @@ -15755,9 +15979,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter654) + foreach ($this->tbl_types as $iter661) { - $xfer += $output->writeString($iter654); + $xfer += $output->writeString($iter661); } } $output->writeListEnd(); @@ -15834,15 +16058,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size655 = 0; - $_etype658 = 0; - $xfer += $input->readListBegin($_etype658, $_size655); - for ($_i659 = 0; $_i659 < $_size655; ++$_i659) + $_size662 = 0; + $_etype665 = 0; + $xfer += $input->readListBegin($_etype665, $_size662); + for ($_i666 = 0; $_i666 < $_size662; ++$_i666) { - $elem660 = null; - $elem660 = new \metastore\TableMeta(); - $xfer += $elem660->read($input); - $this->success []= $elem660; + $elem667 = null; + $elem667 = new \metastore\TableMeta(); + $xfer += $elem667->read($input); + $this->success []= $elem667; } $xfer += $input->readListEnd(); } else { @@ -15878,9 +16102,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter661) + foreach ($this->success as $iter668) { - $xfer += $iter661->write($output); + $xfer += $iter668->write($output); } } $output->writeListEnd(); @@ -16036,14 +16260,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size662 = 0; - $_etype665 = 0; - $xfer += $input->readListBegin($_etype665, $_size662); - for ($_i666 = 0; $_i666 < $_size662; ++$_i666) + $_size669 = 0; + $_etype672 = 0; + $xfer += $input->readListBegin($_etype672, $_size669); + for ($_i673 = 0; $_i673 < $_size669; ++$_i673) { - $elem667 = null; - $xfer += $input->readString($elem667); - $this->success []= $elem667; + $elem674 = null; + $xfer += $input->readString($elem674); + $this->success []= $elem674; } $xfer += $input->readListEnd(); } else { @@ -16079,9 +16303,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter668) + foreach ($this->success as $iter675) { - $xfer += $output->writeString($iter668); + $xfer += $output->writeString($iter675); } } $output->writeListEnd(); @@ -16396,14 +16620,14 @@ public function read($input) case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size669 = 0; - $_etype672 = 0; - $xfer += $input->readListBegin($_etype672, $_size669); - for ($_i673 = 0; $_i673 < $_size669; ++$_i673) + $_size676 = 0; + $_etype679 = 0; + $xfer += $input->readListBegin($_etype679, $_size676); + for ($_i680 = 0; $_i680 < $_size676; ++$_i680) { - $elem674 = null; - $xfer += $input->readString($elem674); - $this->tbl_names []= $elem674; + $elem681 = null; + $xfer += $input->readString($elem681); + $this->tbl_names []= $elem681; } $xfer += $input->readListEnd(); } else { @@ -16436,9 +16660,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter675) + foreach ($this->tbl_names as $iter682) { - $xfer += $output->writeString($iter675); + $xfer += $output->writeString($iter682); } } $output->writeListEnd(); @@ -16539,15 +16763,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size676 = 0; - $_etype679 = 0; - $xfer += $input->readListBegin($_etype679, $_size676); - for ($_i680 = 0; $_i680 < $_size676; ++$_i680) + $_size683 = 0; + $_etype686 = 0; + $xfer += $input->readListBegin($_etype686, $_size683); + for ($_i687 = 0; $_i687 < $_size683; ++$_i687) { - $elem681 = null; - $elem681 = new \metastore\Table(); - $xfer += $elem681->read($input); - $this->success []= $elem681; + $elem688 = null; + $elem688 = new \metastore\Table(); + $xfer += $elem688->read($input); + $this->success []= $elem688; } $xfer += $input->readListEnd(); } else { @@ -16599,9 +16823,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter682) + foreach ($this->success as $iter689) { - $xfer += $iter682->write($output); + $xfer += $iter689->write($output); } } $output->writeListEnd(); @@ -16837,14 +17061,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size683 = 0; - $_etype686 = 0; - $xfer += $input->readListBegin($_etype686, $_size683); - for ($_i687 = 0; $_i687 < $_size683; ++$_i687) + $_size690 = 0; + $_etype693 = 0; + $xfer += $input->readListBegin($_etype693, $_size690); + for ($_i694 = 0; $_i694 < $_size690; ++$_i694) { - $elem688 = null; - $xfer += $input->readString($elem688); - $this->success []= $elem688; + $elem695 = null; + $xfer += $input->readString($elem695); + $this->success []= $elem695; } $xfer += $input->readListEnd(); } else { @@ -16896,9 +17120,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter689) + foreach ($this->success as $iter696) { - $xfer += $output->writeString($iter689); + $xfer += $output->writeString($iter696); } } $output->writeListEnd(); @@ -18211,15 +18435,15 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size690 = 0; - $_etype693 = 0; - $xfer += $input->readListBegin($_etype693, $_size690); - for ($_i694 = 0; $_i694 < $_size690; ++$_i694) + $_size697 = 0; + $_etype700 = 0; + $xfer += $input->readListBegin($_etype700, $_size697); + for ($_i701 = 0; $_i701 < $_size697; ++$_i701) { - $elem695 = null; - $elem695 = new \metastore\Partition(); - $xfer += $elem695->read($input); - $this->new_parts []= $elem695; + $elem702 = null; + $elem702 = new \metastore\Partition(); + $xfer += $elem702->read($input); + $this->new_parts []= $elem702; } $xfer += $input->readListEnd(); } else { @@ -18247,9 +18471,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter696) + foreach ($this->new_parts as $iter703) { - $xfer += $iter696->write($output); + $xfer += $iter703->write($output); } } $output->writeListEnd(); @@ -18464,15 +18688,15 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size697 = 0; - $_etype700 = 0; - $xfer += $input->readListBegin($_etype700, $_size697); - for ($_i701 = 0; $_i701 < $_size697; ++$_i701) + $_size704 = 0; + $_etype707 = 0; + $xfer += $input->readListBegin($_etype707, $_size704); + for ($_i708 = 0; $_i708 < $_size704; ++$_i708) { - $elem702 = null; - $elem702 = new \metastore\PartitionSpec(); - $xfer += $elem702->read($input); - $this->new_parts []= $elem702; + $elem709 = null; + $elem709 = new \metastore\PartitionSpec(); + $xfer += $elem709->read($input); + $this->new_parts []= $elem709; } $xfer += $input->readListEnd(); } else { @@ -18500,9 +18724,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter703) + foreach ($this->new_parts as $iter710) { - $xfer += $iter703->write($output); + $xfer += $iter710->write($output); } } $output->writeListEnd(); @@ -18752,14 +18976,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size704 = 0; - $_etype707 = 0; - $xfer += $input->readListBegin($_etype707, $_size704); - for ($_i708 = 0; $_i708 < $_size704; ++$_i708) + $_size711 = 0; + $_etype714 = 0; + $xfer += $input->readListBegin($_etype714, $_size711); + for ($_i715 = 0; $_i715 < $_size711; ++$_i715) { - $elem709 = null; - $xfer += $input->readString($elem709); - $this->part_vals []= $elem709; + $elem716 = null; + $xfer += $input->readString($elem716); + $this->part_vals []= $elem716; } $xfer += $input->readListEnd(); } else { @@ -18797,9 +19021,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter710) + foreach ($this->part_vals as $iter717) { - $xfer += $output->writeString($iter710); + $xfer += $output->writeString($iter717); } } $output->writeListEnd(); @@ -19301,14 +19525,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size711 = 0; - $_etype714 = 0; - $xfer += $input->readListBegin($_etype714, $_size711); - for ($_i715 = 0; $_i715 < $_size711; ++$_i715) + $_size718 = 0; + $_etype721 = 0; + $xfer += $input->readListBegin($_etype721, $_size718); + for ($_i722 = 0; $_i722 < $_size718; ++$_i722) { - $elem716 = null; - $xfer += $input->readString($elem716); - $this->part_vals []= $elem716; + $elem723 = null; + $xfer += $input->readString($elem723); + $this->part_vals []= $elem723; } $xfer += $input->readListEnd(); } else { @@ -19354,9 +19578,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter717) + foreach ($this->part_vals as $iter724) { - $xfer += $output->writeString($iter717); + $xfer += $output->writeString($iter724); } } $output->writeListEnd(); @@ -20210,14 +20434,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size718 = 0; - $_etype721 = 0; - $xfer += $input->readListBegin($_etype721, $_size718); - for ($_i722 = 0; $_i722 < $_size718; ++$_i722) + $_size725 = 0; + $_etype728 = 0; + $xfer += $input->readListBegin($_etype728, $_size725); + for ($_i729 = 0; $_i729 < $_size725; ++$_i729) { - $elem723 = null; - $xfer += $input->readString($elem723); - $this->part_vals []= $elem723; + $elem730 = null; + $xfer += $input->readString($elem730); + $this->part_vals []= $elem730; } $xfer += $input->readListEnd(); } else { @@ -20262,9 +20486,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter724) + foreach ($this->part_vals as $iter731) { - $xfer += $output->writeString($iter724); + $xfer += $output->writeString($iter731); } } $output->writeListEnd(); @@ -20517,14 +20741,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size725 = 0; - $_etype728 = 0; - $xfer += $input->readListBegin($_etype728, $_size725); - for ($_i729 = 0; $_i729 < $_size725; ++$_i729) + $_size732 = 0; + $_etype735 = 0; + $xfer += $input->readListBegin($_etype735, $_size732); + for ($_i736 = 0; $_i736 < $_size732; ++$_i736) { - $elem730 = null; - $xfer += $input->readString($elem730); - $this->part_vals []= $elem730; + $elem737 = null; + $xfer += $input->readString($elem737); + $this->part_vals []= $elem737; } $xfer += $input->readListEnd(); } else { @@ -20577,9 +20801,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter731) + foreach ($this->part_vals as $iter738) { - $xfer += $output->writeString($iter731); + $xfer += $output->writeString($iter738); } } $output->writeListEnd(); @@ -21593,14 +21817,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size732 = 0; - $_etype735 = 0; - $xfer += $input->readListBegin($_etype735, $_size732); - for ($_i736 = 0; $_i736 < $_size732; ++$_i736) + $_size739 = 0; + $_etype742 = 0; + $xfer += $input->readListBegin($_etype742, $_size739); + for ($_i743 = 0; $_i743 < $_size739; ++$_i743) { - $elem737 = null; - $xfer += $input->readString($elem737); - $this->part_vals []= $elem737; + $elem744 = null; + $xfer += $input->readString($elem744); + $this->part_vals []= $elem744; } $xfer += $input->readListEnd(); } else { @@ -21638,9 +21862,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter738) + foreach ($this->part_vals as $iter745) { - $xfer += $output->writeString($iter738); + $xfer += $output->writeString($iter745); } } $output->writeListEnd(); @@ -21882,17 +22106,17 @@ public function read($input) case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size739 = 0; - $_ktype740 = 0; - $_vtype741 = 0; - $xfer += $input->readMapBegin($_ktype740, $_vtype741, $_size739); - for ($_i743 = 0; $_i743 < $_size739; ++$_i743) + $_size746 = 0; + $_ktype747 = 0; + $_vtype748 = 0; + $xfer += $input->readMapBegin($_ktype747, $_vtype748, $_size746); + for ($_i750 = 0; $_i750 < $_size746; ++$_i750) { - $key744 = ''; - $val745 = ''; - $xfer += $input->readString($key744); - $xfer += $input->readString($val745); - $this->partitionSpecs[$key744] = $val745; + $key751 = ''; + $val752 = ''; + $xfer += $input->readString($key751); + $xfer += $input->readString($val752); + $this->partitionSpecs[$key751] = $val752; } $xfer += $input->readMapEnd(); } else { @@ -21948,10 +22172,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter746 => $viter747) + foreach ($this->partitionSpecs as $kiter753 => $viter754) { - $xfer += $output->writeString($kiter746); - $xfer += $output->writeString($viter747); + $xfer += $output->writeString($kiter753); + $xfer += $output->writeString($viter754); } } $output->writeMapEnd(); @@ -22263,17 +22487,17 @@ public function read($input) case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size748 = 0; - $_ktype749 = 0; - $_vtype750 = 0; - $xfer += $input->readMapBegin($_ktype749, $_vtype750, $_size748); - for ($_i752 = 0; $_i752 < $_size748; ++$_i752) + $_size755 = 0; + $_ktype756 = 0; + $_vtype757 = 0; + $xfer += $input->readMapBegin($_ktype756, $_vtype757, $_size755); + for ($_i759 = 0; $_i759 < $_size755; ++$_i759) { - $key753 = ''; - $val754 = ''; - $xfer += $input->readString($key753); - $xfer += $input->readString($val754); - $this->partitionSpecs[$key753] = $val754; + $key760 = ''; + $val761 = ''; + $xfer += $input->readString($key760); + $xfer += $input->readString($val761); + $this->partitionSpecs[$key760] = $val761; } $xfer += $input->readMapEnd(); } else { @@ -22329,10 +22553,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter755 => $viter756) + foreach ($this->partitionSpecs as $kiter762 => $viter763) { - $xfer += $output->writeString($kiter755); - $xfer += $output->writeString($viter756); + $xfer += $output->writeString($kiter762); + $xfer += $output->writeString($viter763); } } $output->writeMapEnd(); @@ -22465,15 +22689,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size757 = 0; - $_etype760 = 0; - $xfer += $input->readListBegin($_etype760, $_size757); - for ($_i761 = 0; $_i761 < $_size757; ++$_i761) + $_size764 = 0; + $_etype767 = 0; + $xfer += $input->readListBegin($_etype767, $_size764); + for ($_i768 = 0; $_i768 < $_size764; ++$_i768) { - $elem762 = null; - $elem762 = new \metastore\Partition(); - $xfer += $elem762->read($input); - $this->success []= $elem762; + $elem769 = null; + $elem769 = new \metastore\Partition(); + $xfer += $elem769->read($input); + $this->success []= $elem769; } $xfer += $input->readListEnd(); } else { @@ -22533,9 +22757,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter763) + foreach ($this->success as $iter770) { - $xfer += $iter763->write($output); + $xfer += $iter770->write($output); } } $output->writeListEnd(); @@ -22681,14 +22905,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size764 = 0; - $_etype767 = 0; - $xfer += $input->readListBegin($_etype767, $_size764); - for ($_i768 = 0; $_i768 < $_size764; ++$_i768) + $_size771 = 0; + $_etype774 = 0; + $xfer += $input->readListBegin($_etype774, $_size771); + for ($_i775 = 0; $_i775 < $_size771; ++$_i775) { - $elem769 = null; - $xfer += $input->readString($elem769); - $this->part_vals []= $elem769; + $elem776 = null; + $xfer += $input->readString($elem776); + $this->part_vals []= $elem776; } $xfer += $input->readListEnd(); } else { @@ -22705,14 +22929,14 @@ public function read($input) case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size770 = 0; - $_etype773 = 0; - $xfer += $input->readListBegin($_etype773, $_size770); - for ($_i774 = 0; $_i774 < $_size770; ++$_i774) + $_size777 = 0; + $_etype780 = 0; + $xfer += $input->readListBegin($_etype780, $_size777); + for ($_i781 = 0; $_i781 < $_size777; ++$_i781) { - $elem775 = null; - $xfer += $input->readString($elem775); - $this->group_names []= $elem775; + $elem782 = null; + $xfer += $input->readString($elem782); + $this->group_names []= $elem782; } $xfer += $input->readListEnd(); } else { @@ -22750,9 +22974,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter776) + foreach ($this->part_vals as $iter783) { - $xfer += $output->writeString($iter776); + $xfer += $output->writeString($iter783); } } $output->writeListEnd(); @@ -22772,9 +22996,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter777) + foreach ($this->group_names as $iter784) { - $xfer += $output->writeString($iter777); + $xfer += $output->writeString($iter784); } } $output->writeListEnd(); @@ -23365,15 +23589,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size778 = 0; - $_etype781 = 0; - $xfer += $input->readListBegin($_etype781, $_size778); - for ($_i782 = 0; $_i782 < $_size778; ++$_i782) + $_size785 = 0; + $_etype788 = 0; + $xfer += $input->readListBegin($_etype788, $_size785); + for ($_i789 = 0; $_i789 < $_size785; ++$_i789) { - $elem783 = null; - $elem783 = new \metastore\Partition(); - $xfer += $elem783->read($input); - $this->success []= $elem783; + $elem790 = null; + $elem790 = new \metastore\Partition(); + $xfer += $elem790->read($input); + $this->success []= $elem790; } $xfer += $input->readListEnd(); } else { @@ -23417,9 +23641,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter784) + foreach ($this->success as $iter791) { - $xfer += $iter784->write($output); + $xfer += $iter791->write($output); } } $output->writeListEnd(); @@ -23565,14 +23789,14 @@ public function read($input) case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size785 = 0; - $_etype788 = 0; - $xfer += $input->readListBegin($_etype788, $_size785); - for ($_i789 = 0; $_i789 < $_size785; ++$_i789) + $_size792 = 0; + $_etype795 = 0; + $xfer += $input->readListBegin($_etype795, $_size792); + for ($_i796 = 0; $_i796 < $_size792; ++$_i796) { - $elem790 = null; - $xfer += $input->readString($elem790); - $this->group_names []= $elem790; + $elem797 = null; + $xfer += $input->readString($elem797); + $this->group_names []= $elem797; } $xfer += $input->readListEnd(); } else { @@ -23620,9 +23844,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter791) + foreach ($this->group_names as $iter798) { - $xfer += $output->writeString($iter791); + $xfer += $output->writeString($iter798); } } $output->writeListEnd(); @@ -23711,15 +23935,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size792 = 0; - $_etype795 = 0; - $xfer += $input->readListBegin($_etype795, $_size792); - for ($_i796 = 0; $_i796 < $_size792; ++$_i796) + $_size799 = 0; + $_etype802 = 0; + $xfer += $input->readListBegin($_etype802, $_size799); + for ($_i803 = 0; $_i803 < $_size799; ++$_i803) { - $elem797 = null; - $elem797 = new \metastore\Partition(); - $xfer += $elem797->read($input); - $this->success []= $elem797; + $elem804 = null; + $elem804 = new \metastore\Partition(); + $xfer += $elem804->read($input); + $this->success []= $elem804; } $xfer += $input->readListEnd(); } else { @@ -23763,9 +23987,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter798) + foreach ($this->success as $iter805) { - $xfer += $iter798->write($output); + $xfer += $iter805->write($output); } } $output->writeListEnd(); @@ -23985,15 +24209,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size799 = 0; - $_etype802 = 0; - $xfer += $input->readListBegin($_etype802, $_size799); - for ($_i803 = 0; $_i803 < $_size799; ++$_i803) + $_size806 = 0; + $_etype809 = 0; + $xfer += $input->readListBegin($_etype809, $_size806); + for ($_i810 = 0; $_i810 < $_size806; ++$_i810) { - $elem804 = null; - $elem804 = new \metastore\PartitionSpec(); - $xfer += $elem804->read($input); - $this->success []= $elem804; + $elem811 = null; + $elem811 = new \metastore\PartitionSpec(); + $xfer += $elem811->read($input); + $this->success []= $elem811; } $xfer += $input->readListEnd(); } else { @@ -24037,9 +24261,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter805) + foreach ($this->success as $iter812) { - $xfer += $iter805->write($output); + $xfer += $iter812->write($output); } } $output->writeListEnd(); @@ -24246,14 +24470,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size806 = 0; - $_etype809 = 0; - $xfer += $input->readListBegin($_etype809, $_size806); - for ($_i810 = 0; $_i810 < $_size806; ++$_i810) + $_size813 = 0; + $_etype816 = 0; + $xfer += $input->readListBegin($_etype816, $_size813); + for ($_i817 = 0; $_i817 < $_size813; ++$_i817) { - $elem811 = null; - $xfer += $input->readString($elem811); - $this->success []= $elem811; + $elem818 = null; + $xfer += $input->readString($elem818); + $this->success []= $elem818; } $xfer += $input->readListEnd(); } else { @@ -24289,9 +24513,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter812) + foreach ($this->success as $iter819) { - $xfer += $output->writeString($iter812); + $xfer += $output->writeString($iter819); } } $output->writeListEnd(); @@ -24407,14 +24631,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size813 = 0; - $_etype816 = 0; - $xfer += $input->readListBegin($_etype816, $_size813); - for ($_i817 = 0; $_i817 < $_size813; ++$_i817) + $_size820 = 0; + $_etype823 = 0; + $xfer += $input->readListBegin($_etype823, $_size820); + for ($_i824 = 0; $_i824 < $_size820; ++$_i824) { - $elem818 = null; - $xfer += $input->readString($elem818); - $this->part_vals []= $elem818; + $elem825 = null; + $xfer += $input->readString($elem825); + $this->part_vals []= $elem825; } $xfer += $input->readListEnd(); } else { @@ -24459,9 +24683,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter819) + foreach ($this->part_vals as $iter826) { - $xfer += $output->writeString($iter819); + $xfer += $output->writeString($iter826); } } $output->writeListEnd(); @@ -24555,15 +24779,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size820 = 0; - $_etype823 = 0; - $xfer += $input->readListBegin($_etype823, $_size820); - for ($_i824 = 0; $_i824 < $_size820; ++$_i824) + $_size827 = 0; + $_etype830 = 0; + $xfer += $input->readListBegin($_etype830, $_size827); + for ($_i831 = 0; $_i831 < $_size827; ++$_i831) { - $elem825 = null; - $elem825 = new \metastore\Partition(); - $xfer += $elem825->read($input); - $this->success []= $elem825; + $elem832 = null; + $elem832 = new \metastore\Partition(); + $xfer += $elem832->read($input); + $this->success []= $elem832; } $xfer += $input->readListEnd(); } else { @@ -24607,9 +24831,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter826) + foreach ($this->success as $iter833) { - $xfer += $iter826->write($output); + $xfer += $iter833->write($output); } } $output->writeListEnd(); @@ -24756,14 +24980,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size827 = 0; - $_etype830 = 0; - $xfer += $input->readListBegin($_etype830, $_size827); - for ($_i831 = 0; $_i831 < $_size827; ++$_i831) + $_size834 = 0; + $_etype837 = 0; + $xfer += $input->readListBegin($_etype837, $_size834); + for ($_i838 = 0; $_i838 < $_size834; ++$_i838) { - $elem832 = null; - $xfer += $input->readString($elem832); - $this->part_vals []= $elem832; + $elem839 = null; + $xfer += $input->readString($elem839); + $this->part_vals []= $elem839; } $xfer += $input->readListEnd(); } else { @@ -24787,14 +25011,14 @@ public function read($input) case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size833 = 0; - $_etype836 = 0; - $xfer += $input->readListBegin($_etype836, $_size833); - for ($_i837 = 0; $_i837 < $_size833; ++$_i837) + $_size840 = 0; + $_etype843 = 0; + $xfer += $input->readListBegin($_etype843, $_size840); + for ($_i844 = 0; $_i844 < $_size840; ++$_i844) { - $elem838 = null; - $xfer += $input->readString($elem838); - $this->group_names []= $elem838; + $elem845 = null; + $xfer += $input->readString($elem845); + $this->group_names []= $elem845; } $xfer += $input->readListEnd(); } else { @@ -24832,9 +25056,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter839) + foreach ($this->part_vals as $iter846) { - $xfer += $output->writeString($iter839); + $xfer += $output->writeString($iter846); } } $output->writeListEnd(); @@ -24859,9 +25083,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter840) + foreach ($this->group_names as $iter847) { - $xfer += $output->writeString($iter840); + $xfer += $output->writeString($iter847); } } $output->writeListEnd(); @@ -24950,15 +25174,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size841 = 0; - $_etype844 = 0; - $xfer += $input->readListBegin($_etype844, $_size841); - for ($_i845 = 0; $_i845 < $_size841; ++$_i845) + $_size848 = 0; + $_etype851 = 0; + $xfer += $input->readListBegin($_etype851, $_size848); + for ($_i852 = 0; $_i852 < $_size848; ++$_i852) { - $elem846 = null; - $elem846 = new \metastore\Partition(); - $xfer += $elem846->read($input); - $this->success []= $elem846; + $elem853 = null; + $elem853 = new \metastore\Partition(); + $xfer += $elem853->read($input); + $this->success []= $elem853; } $xfer += $input->readListEnd(); } else { @@ -25002,9 +25226,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter847) + foreach ($this->success as $iter854) { - $xfer += $iter847->write($output); + $xfer += $iter854->write($output); } } $output->writeListEnd(); @@ -25125,14 +25349,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size848 = 0; - $_etype851 = 0; - $xfer += $input->readListBegin($_etype851, $_size848); - for ($_i852 = 0; $_i852 < $_size848; ++$_i852) + $_size855 = 0; + $_etype858 = 0; + $xfer += $input->readListBegin($_etype858, $_size855); + for ($_i859 = 0; $_i859 < $_size855; ++$_i859) { - $elem853 = null; - $xfer += $input->readString($elem853); - $this->part_vals []= $elem853; + $elem860 = null; + $xfer += $input->readString($elem860); + $this->part_vals []= $elem860; } $xfer += $input->readListEnd(); } else { @@ -25177,9 +25401,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter854) + foreach ($this->part_vals as $iter861) { - $xfer += $output->writeString($iter854); + $xfer += $output->writeString($iter861); } } $output->writeListEnd(); @@ -25272,14 +25496,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size855 = 0; - $_etype858 = 0; - $xfer += $input->readListBegin($_etype858, $_size855); - for ($_i859 = 0; $_i859 < $_size855; ++$_i859) + $_size862 = 0; + $_etype865 = 0; + $xfer += $input->readListBegin($_etype865, $_size862); + for ($_i866 = 0; $_i866 < $_size862; ++$_i866) { - $elem860 = null; - $xfer += $input->readString($elem860); - $this->success []= $elem860; + $elem867 = null; + $xfer += $input->readString($elem867); + $this->success []= $elem867; } $xfer += $input->readListEnd(); } else { @@ -25323,9 +25547,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter861) + foreach ($this->success as $iter868) { - $xfer += $output->writeString($iter861); + $xfer += $output->writeString($iter868); } } $output->writeListEnd(); @@ -25568,15 +25792,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size862 = 0; - $_etype865 = 0; - $xfer += $input->readListBegin($_etype865, $_size862); - for ($_i866 = 0; $_i866 < $_size862; ++$_i866) + $_size869 = 0; + $_etype872 = 0; + $xfer += $input->readListBegin($_etype872, $_size869); + for ($_i873 = 0; $_i873 < $_size869; ++$_i873) { - $elem867 = null; - $elem867 = new \metastore\Partition(); - $xfer += $elem867->read($input); - $this->success []= $elem867; + $elem874 = null; + $elem874 = new \metastore\Partition(); + $xfer += $elem874->read($input); + $this->success []= $elem874; } $xfer += $input->readListEnd(); } else { @@ -25620,9 +25844,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter868) + foreach ($this->success as $iter875) { - $xfer += $iter868->write($output); + $xfer += $iter875->write($output); } } $output->writeListEnd(); @@ -25865,15 +26089,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size869 = 0; - $_etype872 = 0; - $xfer += $input->readListBegin($_etype872, $_size869); - for ($_i873 = 0; $_i873 < $_size869; ++$_i873) + $_size876 = 0; + $_etype879 = 0; + $xfer += $input->readListBegin($_etype879, $_size876); + for ($_i880 = 0; $_i880 < $_size876; ++$_i880) { - $elem874 = null; - $elem874 = new \metastore\PartitionSpec(); - $xfer += $elem874->read($input); - $this->success []= $elem874; + $elem881 = null; + $elem881 = new \metastore\PartitionSpec(); + $xfer += $elem881->read($input); + $this->success []= $elem881; } $xfer += $input->readListEnd(); } else { @@ -25917,9 +26141,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter875) + foreach ($this->success as $iter882) { - $xfer += $iter875->write($output); + $xfer += $iter882->write($output); } } $output->writeListEnd(); @@ -26485,14 +26709,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size876 = 0; - $_etype879 = 0; - $xfer += $input->readListBegin($_etype879, $_size876); - for ($_i880 = 0; $_i880 < $_size876; ++$_i880) + $_size883 = 0; + $_etype886 = 0; + $xfer += $input->readListBegin($_etype886, $_size883); + for ($_i887 = 0; $_i887 < $_size883; ++$_i887) { - $elem881 = null; - $xfer += $input->readString($elem881); - $this->names []= $elem881; + $elem888 = null; + $xfer += $input->readString($elem888); + $this->names []= $elem888; } $xfer += $input->readListEnd(); } else { @@ -26530,9 +26754,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter882) + foreach ($this->names as $iter889) { - $xfer += $output->writeString($iter882); + $xfer += $output->writeString($iter889); } } $output->writeListEnd(); @@ -26621,15 +26845,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size883 = 0; - $_etype886 = 0; - $xfer += $input->readListBegin($_etype886, $_size883); - for ($_i887 = 0; $_i887 < $_size883; ++$_i887) + $_size890 = 0; + $_etype893 = 0; + $xfer += $input->readListBegin($_etype893, $_size890); + for ($_i894 = 0; $_i894 < $_size890; ++$_i894) { - $elem888 = null; - $elem888 = new \metastore\Partition(); - $xfer += $elem888->read($input); - $this->success []= $elem888; + $elem895 = null; + $elem895 = new \metastore\Partition(); + $xfer += $elem895->read($input); + $this->success []= $elem895; } $xfer += $input->readListEnd(); } else { @@ -26673,9 +26897,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter889) + foreach ($this->success as $iter896) { - $xfer += $iter889->write($output); + $xfer += $iter896->write($output); } } $output->writeListEnd(); @@ -27014,15 +27238,15 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size890 = 0; - $_etype893 = 0; - $xfer += $input->readListBegin($_etype893, $_size890); - for ($_i894 = 0; $_i894 < $_size890; ++$_i894) + $_size897 = 0; + $_etype900 = 0; + $xfer += $input->readListBegin($_etype900, $_size897); + for ($_i901 = 0; $_i901 < $_size897; ++$_i901) { - $elem895 = null; - $elem895 = new \metastore\Partition(); - $xfer += $elem895->read($input); - $this->new_parts []= $elem895; + $elem902 = null; + $elem902 = new \metastore\Partition(); + $xfer += $elem902->read($input); + $this->new_parts []= $elem902; } $xfer += $input->readListEnd(); } else { @@ -27060,9 +27284,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter896) + foreach ($this->new_parts as $iter903) { - $xfer += $iter896->write($output); + $xfer += $iter903->write($output); } } $output->writeListEnd(); @@ -27277,15 +27501,15 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size897 = 0; - $_etype900 = 0; - $xfer += $input->readListBegin($_etype900, $_size897); - for ($_i901 = 0; $_i901 < $_size897; ++$_i901) + $_size904 = 0; + $_etype907 = 0; + $xfer += $input->readListBegin($_etype907, $_size904); + for ($_i908 = 0; $_i908 < $_size904; ++$_i908) { - $elem902 = null; - $elem902 = new \metastore\Partition(); - $xfer += $elem902->read($input); - $this->new_parts []= $elem902; + $elem909 = null; + $elem909 = new \metastore\Partition(); + $xfer += $elem909->read($input); + $this->new_parts []= $elem909; } $xfer += $input->readListEnd(); } else { @@ -27331,9 +27555,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter903) + foreach ($this->new_parts as $iter910) { - $xfer += $iter903->write($output); + $xfer += $iter910->write($output); } } $output->writeListEnd(); @@ -27811,14 +28035,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size904 = 0; - $_etype907 = 0; - $xfer += $input->readListBegin($_etype907, $_size904); - for ($_i908 = 0; $_i908 < $_size904; ++$_i908) + $_size911 = 0; + $_etype914 = 0; + $xfer += $input->readListBegin($_etype914, $_size911); + for ($_i915 = 0; $_i915 < $_size911; ++$_i915) { - $elem909 = null; - $xfer += $input->readString($elem909); - $this->part_vals []= $elem909; + $elem916 = null; + $xfer += $input->readString($elem916); + $this->part_vals []= $elem916; } $xfer += $input->readListEnd(); } else { @@ -27864,9 +28088,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter910) + foreach ($this->part_vals as $iter917) { - $xfer += $output->writeString($iter910); + $xfer += $output->writeString($iter917); } } $output->writeListEnd(); @@ -28051,14 +28275,14 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size911 = 0; - $_etype914 = 0; - $xfer += $input->readListBegin($_etype914, $_size911); - for ($_i915 = 0; $_i915 < $_size911; ++$_i915) + $_size918 = 0; + $_etype921 = 0; + $xfer += $input->readListBegin($_etype921, $_size918); + for ($_i922 = 0; $_i922 < $_size918; ++$_i922) { - $elem916 = null; - $xfer += $input->readString($elem916); - $this->part_vals []= $elem916; + $elem923 = null; + $xfer += $input->readString($elem923); + $this->part_vals []= $elem923; } $xfer += $input->readListEnd(); } else { @@ -28093,9 +28317,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter917) + foreach ($this->part_vals as $iter924) { - $xfer += $output->writeString($iter917); + $xfer += $output->writeString($iter924); } } $output->writeListEnd(); @@ -28549,14 +28773,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size918 = 0; - $_etype921 = 0; - $xfer += $input->readListBegin($_etype921, $_size918); - for ($_i922 = 0; $_i922 < $_size918; ++$_i922) + $_size925 = 0; + $_etype928 = 0; + $xfer += $input->readListBegin($_etype928, $_size925); + for ($_i929 = 0; $_i929 < $_size925; ++$_i929) { - $elem923 = null; - $xfer += $input->readString($elem923); - $this->success []= $elem923; + $elem930 = null; + $xfer += $input->readString($elem930); + $this->success []= $elem930; } $xfer += $input->readListEnd(); } else { @@ -28592,9 +28816,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter924) + foreach ($this->success as $iter931) { - $xfer += $output->writeString($iter924); + $xfer += $output->writeString($iter931); } } $output->writeListEnd(); @@ -28754,17 +28978,17 @@ public function read($input) case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size925 = 0; - $_ktype926 = 0; - $_vtype927 = 0; - $xfer += $input->readMapBegin($_ktype926, $_vtype927, $_size925); - for ($_i929 = 0; $_i929 < $_size925; ++$_i929) + $_size932 = 0; + $_ktype933 = 0; + $_vtype934 = 0; + $xfer += $input->readMapBegin($_ktype933, $_vtype934, $_size932); + for ($_i936 = 0; $_i936 < $_size932; ++$_i936) { - $key930 = ''; - $val931 = ''; - $xfer += $input->readString($key930); - $xfer += $input->readString($val931); - $this->success[$key930] = $val931; + $key937 = ''; + $val938 = ''; + $xfer += $input->readString($key937); + $xfer += $input->readString($val938); + $this->success[$key937] = $val938; } $xfer += $input->readMapEnd(); } else { @@ -28800,10 +29024,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter932 => $viter933) + foreach ($this->success as $kiter939 => $viter940) { - $xfer += $output->writeString($kiter932); - $xfer += $output->writeString($viter933); + $xfer += $output->writeString($kiter939); + $xfer += $output->writeString($viter940); } } $output->writeMapEnd(); @@ -28923,17 +29147,17 @@ public function read($input) case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size934 = 0; - $_ktype935 = 0; - $_vtype936 = 0; - $xfer += $input->readMapBegin($_ktype935, $_vtype936, $_size934); - for ($_i938 = 0; $_i938 < $_size934; ++$_i938) + $_size941 = 0; + $_ktype942 = 0; + $_vtype943 = 0; + $xfer += $input->readMapBegin($_ktype942, $_vtype943, $_size941); + for ($_i945 = 0; $_i945 < $_size941; ++$_i945) { - $key939 = ''; - $val940 = ''; - $xfer += $input->readString($key939); - $xfer += $input->readString($val940); - $this->part_vals[$key939] = $val940; + $key946 = ''; + $val947 = ''; + $xfer += $input->readString($key946); + $xfer += $input->readString($val947); + $this->part_vals[$key946] = $val947; } $xfer += $input->readMapEnd(); } else { @@ -28978,10 +29202,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter941 => $viter942) + foreach ($this->part_vals as $kiter948 => $viter949) { - $xfer += $output->writeString($kiter941); - $xfer += $output->writeString($viter942); + $xfer += $output->writeString($kiter948); + $xfer += $output->writeString($viter949); } } $output->writeMapEnd(); @@ -29303,17 +29527,17 @@ public function read($input) case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size943 = 0; - $_ktype944 = 0; - $_vtype945 = 0; - $xfer += $input->readMapBegin($_ktype944, $_vtype945, $_size943); - for ($_i947 = 0; $_i947 < $_size943; ++$_i947) + $_size950 = 0; + $_ktype951 = 0; + $_vtype952 = 0; + $xfer += $input->readMapBegin($_ktype951, $_vtype952, $_size950); + for ($_i954 = 0; $_i954 < $_size950; ++$_i954) { - $key948 = ''; - $val949 = ''; - $xfer += $input->readString($key948); - $xfer += $input->readString($val949); - $this->part_vals[$key948] = $val949; + $key955 = ''; + $val956 = ''; + $xfer += $input->readString($key955); + $xfer += $input->readString($val956); + $this->part_vals[$key955] = $val956; } $xfer += $input->readMapEnd(); } else { @@ -29358,10 +29582,10 @@ public function write($output) { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter950 => $viter951) + foreach ($this->part_vals as $kiter957 => $viter958) { - $xfer += $output->writeString($kiter950); - $xfer += $output->writeString($viter951); + $xfer += $output->writeString($kiter957); + $xfer += $output->writeString($viter958); } } $output->writeMapEnd(); @@ -30835,15 +31059,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size952 = 0; - $_etype955 = 0; - $xfer += $input->readListBegin($_etype955, $_size952); - for ($_i956 = 0; $_i956 < $_size952; ++$_i956) + $_size959 = 0; + $_etype962 = 0; + $xfer += $input->readListBegin($_etype962, $_size959); + for ($_i963 = 0; $_i963 < $_size959; ++$_i963) { - $elem957 = null; - $elem957 = new \metastore\Index(); - $xfer += $elem957->read($input); - $this->success []= $elem957; + $elem964 = null; + $elem964 = new \metastore\Index(); + $xfer += $elem964->read($input); + $this->success []= $elem964; } $xfer += $input->readListEnd(); } else { @@ -30887,9 +31111,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter958) + foreach ($this->success as $iter965) { - $xfer += $iter958->write($output); + $xfer += $iter965->write($output); } } $output->writeListEnd(); @@ -31096,14 +31320,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size959 = 0; - $_etype962 = 0; - $xfer += $input->readListBegin($_etype962, $_size959); - for ($_i963 = 0; $_i963 < $_size959; ++$_i963) + $_size966 = 0; + $_etype969 = 0; + $xfer += $input->readListBegin($_etype969, $_size966); + for ($_i970 = 0; $_i970 < $_size966; ++$_i970) { - $elem964 = null; - $xfer += $input->readString($elem964); - $this->success []= $elem964; + $elem971 = null; + $xfer += $input->readString($elem971); + $this->success []= $elem971; } $xfer += $input->readListEnd(); } else { @@ -31139,9 +31363,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter965) + foreach ($this->success as $iter972) { - $xfer += $output->writeString($iter965); + $xfer += $output->writeString($iter972); } } $output->writeListEnd(); @@ -35035,14 +35259,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size966 = 0; - $_etype969 = 0; - $xfer += $input->readListBegin($_etype969, $_size966); - for ($_i970 = 0; $_i970 < $_size966; ++$_i970) + $_size973 = 0; + $_etype976 = 0; + $xfer += $input->readListBegin($_etype976, $_size973); + for ($_i977 = 0; $_i977 < $_size973; ++$_i977) { - $elem971 = null; - $xfer += $input->readString($elem971); - $this->success []= $elem971; + $elem978 = null; + $xfer += $input->readString($elem978); + $this->success []= $elem978; } $xfer += $input->readListEnd(); } else { @@ -35078,9 +35302,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter972) + foreach ($this->success as $iter979) { - $xfer += $output->writeString($iter972); + $xfer += $output->writeString($iter979); } } $output->writeListEnd(); @@ -35949,14 +36173,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size973 = 0; - $_etype976 = 0; - $xfer += $input->readListBegin($_etype976, $_size973); - for ($_i977 = 0; $_i977 < $_size973; ++$_i977) + $_size980 = 0; + $_etype983 = 0; + $xfer += $input->readListBegin($_etype983, $_size980); + for ($_i984 = 0; $_i984 < $_size980; ++$_i984) { - $elem978 = null; - $xfer += $input->readString($elem978); - $this->success []= $elem978; + $elem985 = null; + $xfer += $input->readString($elem985); + $this->success []= $elem985; } $xfer += $input->readListEnd(); } else { @@ -35992,9 +36216,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter979) + foreach ($this->success as $iter986) { - $xfer += $output->writeString($iter979); + $xfer += $output->writeString($iter986); } } $output->writeListEnd(); @@ -36685,15 +36909,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size980 = 0; - $_etype983 = 0; - $xfer += $input->readListBegin($_etype983, $_size980); - for ($_i984 = 0; $_i984 < $_size980; ++$_i984) + $_size987 = 0; + $_etype990 = 0; + $xfer += $input->readListBegin($_etype990, $_size987); + for ($_i991 = 0; $_i991 < $_size987; ++$_i991) { - $elem985 = null; - $elem985 = new \metastore\Role(); - $xfer += $elem985->read($input); - $this->success []= $elem985; + $elem992 = null; + $elem992 = new \metastore\Role(); + $xfer += $elem992->read($input); + $this->success []= $elem992; } $xfer += $input->readListEnd(); } else { @@ -36729,9 +36953,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter986) + foreach ($this->success as $iter993) { - $xfer += $iter986->write($output); + $xfer += $iter993->write($output); } } $output->writeListEnd(); @@ -37393,14 +37617,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size987 = 0; - $_etype990 = 0; - $xfer += $input->readListBegin($_etype990, $_size987); - for ($_i991 = 0; $_i991 < $_size987; ++$_i991) + $_size994 = 0; + $_etype997 = 0; + $xfer += $input->readListBegin($_etype997, $_size994); + for ($_i998 = 0; $_i998 < $_size994; ++$_i998) { - $elem992 = null; - $xfer += $input->readString($elem992); - $this->group_names []= $elem992; + $elem999 = null; + $xfer += $input->readString($elem999); + $this->group_names []= $elem999; } $xfer += $input->readListEnd(); } else { @@ -37441,9 +37665,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter993) + foreach ($this->group_names as $iter1000) { - $xfer += $output->writeString($iter993); + $xfer += $output->writeString($iter1000); } } $output->writeListEnd(); @@ -37751,15 +37975,15 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size994 = 0; - $_etype997 = 0; - $xfer += $input->readListBegin($_etype997, $_size994); - for ($_i998 = 0; $_i998 < $_size994; ++$_i998) + $_size1001 = 0; + $_etype1004 = 0; + $xfer += $input->readListBegin($_etype1004, $_size1001); + for ($_i1005 = 0; $_i1005 < $_size1001; ++$_i1005) { - $elem999 = null; - $elem999 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem999->read($input); - $this->success []= $elem999; + $elem1006 = null; + $elem1006 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1006->read($input); + $this->success []= $elem1006; } $xfer += $input->readListEnd(); } else { @@ -37795,9 +38019,9 @@ public function write($output) { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter1000) + foreach ($this->success as $iter1007) { - $xfer += $iter1000->write($output); + $xfer += $iter1007->write($output); } } $output->writeListEnd(); @@ -38429,14 +38653,14 @@ public function read($input) case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1001 = 0; - $_etype1004 = 0; - $xfer += $input->readListBegin($_etype1004, $_size1001); - for ($_i1005 = 0; $_i1005 < $_size1001; ++$_i1005) + $_size1008 = 0; + $_etype1011 = 0; + $xfer += $input->readListBegin($_etype1011, $_size1008); + for ($_i1012 = 0; $_i1012 < $_size1008; ++$_i1012) { - $elem1006 = null; - $xfer += $input->readString($elem1006); - $this->group_names []= $elem1006; + $elem1013 = null; + $xfer += $input->readString($elem1013); + $this->group_names []= $elem1013; } $xfer += $input->readListEnd(); } else { @@ -38469,9 +38693,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter1007) + foreach ($this->group_names as $iter1014) { - $xfer += $output->writeString($iter1007); + $xfer += $output->writeString($iter1014); } } $output->writeListEnd(); @@ -38547,14 +38771,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1008 = 0; - $_etype1011 = 0; - $xfer += $input->readListBegin($_etype1011, $_size1008); - for ($_i1012 = 0; $_i1012 < $_size1008; ++$_i1012) + $_size1015 = 0; + $_etype1018 = 0; + $xfer += $input->readListBegin($_etype1018, $_size1015); + for ($_i1019 = 0; $_i1019 < $_size1015; ++$_i1019) { - $elem1013 = null; - $xfer += $input->readString($elem1013); - $this->success []= $elem1013; + $elem1020 = null; + $xfer += $input->readString($elem1020); + $this->success []= $elem1020; } $xfer += $input->readListEnd(); } else { @@ -38590,9 +38814,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1014) + foreach ($this->success as $iter1021) { - $xfer += $output->writeString($iter1014); + $xfer += $output->writeString($iter1021); } } $output->writeListEnd(); @@ -39709,14 +39933,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1015 = 0; - $_etype1018 = 0; - $xfer += $input->readListBegin($_etype1018, $_size1015); - for ($_i1019 = 0; $_i1019 < $_size1015; ++$_i1019) + $_size1022 = 0; + $_etype1025 = 0; + $xfer += $input->readListBegin($_etype1025, $_size1022); + for ($_i1026 = 0; $_i1026 < $_size1022; ++$_i1026) { - $elem1020 = null; - $xfer += $input->readString($elem1020); - $this->success []= $elem1020; + $elem1027 = null; + $xfer += $input->readString($elem1027); + $this->success []= $elem1027; } $xfer += $input->readListEnd(); } else { @@ -39744,9 +39968,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1021) + foreach ($this->success as $iter1028) { - $xfer += $output->writeString($iter1021); + $xfer += $output->writeString($iter1028); } } $output->writeListEnd(); @@ -40385,14 +40609,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1022 = 0; - $_etype1025 = 0; - $xfer += $input->readListBegin($_etype1025, $_size1022); - for ($_i1026 = 0; $_i1026 < $_size1022; ++$_i1026) + $_size1029 = 0; + $_etype1032 = 0; + $xfer += $input->readListBegin($_etype1032, $_size1029); + for ($_i1033 = 0; $_i1033 < $_size1029; ++$_i1033) { - $elem1027 = null; - $xfer += $input->readString($elem1027); - $this->success []= $elem1027; + $elem1034 = null; + $xfer += $input->readString($elem1034); + $this->success []= $elem1034; } $xfer += $input->readListEnd(); } else { @@ -40420,9 +40644,9 @@ public function write($output) { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter1028) + foreach ($this->success as $iter1035) { - $xfer += $output->writeString($iter1028); + $xfer += $output->writeString($iter1035); } } $output->writeListEnd(); @@ -44328,4 +44552,644 @@ public function write($output) { } +class ThriftHiveMetastore_get_next_write_id_args { + static $_TSPEC; + + /** + * @var \metastore\GetNextWriteIdRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetNextWriteIdRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_next_write_id_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\GetNextWriteIdRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_next_write_id_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_next_write_id_result { + static $_TSPEC; + + /** + * @var \metastore\GetNextWriteIdResult + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetNextWriteIdResult', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_next_write_id_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\GetNextWriteIdResult(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_next_write_id_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_finalize_write_id_args { + static $_TSPEC; + + /** + * @var \metastore\FinalizeWriteIdRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\FinalizeWriteIdRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_finalize_write_id_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\FinalizeWriteIdRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_finalize_write_id_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_finalize_write_id_result { + static $_TSPEC; + + /** + * @var \metastore\FinalizeWriteIdResult + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\FinalizeWriteIdResult', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_finalize_write_id_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\FinalizeWriteIdResult(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_finalize_write_id_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_heartbeat_write_id_args { + static $_TSPEC; + + /** + * @var \metastore\HeartbeatWriteIdRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\HeartbeatWriteIdRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_heartbeat_write_id_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\HeartbeatWriteIdRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_heartbeat_write_id_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_heartbeat_write_id_result { + static $_TSPEC; + + /** + * @var \metastore\HeartbeatWriteIdResult + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\HeartbeatWriteIdResult', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_heartbeat_write_id_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\HeartbeatWriteIdResult(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_heartbeat_write_id_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_valid_write_ids_args { + static $_TSPEC; + + /** + * @var \metastore\GetValidWriteIdsRequest + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetValidWriteIdsRequest', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_valid_write_ids_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \metastore\GetValidWriteIdsRequest(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_valid_write_ids_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_valid_write_ids_result { + static $_TSPEC; + + /** + * @var \metastore\GetValidWriteIdsResult + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\GetValidWriteIdsResult', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_valid_write_ids_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\GetValidWriteIdsResult(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_valid_write_ids_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php index 61171c20e11b..e4eea39567c1 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -4556,6 +4556,14 @@ class Table { * @var bool */ public $temporary = false; + /** + * @var int + */ + public $mmNextWriteId = null; + /** + * @var int + */ + public $mmWatermarkWriteId = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -4631,6 +4639,14 @@ public function __construct($vals=null) { 'var' => 'temporary', 'type' => TType::BOOL, ), + 15 => array( + 'var' => 'mmNextWriteId', + 'type' => TType::I64, + ), + 16 => array( + 'var' => 'mmWatermarkWriteId', + 'type' => TType::I64, + ), ); } if (is_array($vals)) { @@ -4676,6 +4692,12 @@ public function __construct($vals=null) { if (isset($vals['temporary'])) { $this->temporary = $vals['temporary']; } + if (isset($vals['mmNextWriteId'])) { + $this->mmNextWriteId = $vals['mmNextWriteId']; + } + if (isset($vals['mmWatermarkWriteId'])) { + $this->mmWatermarkWriteId = $vals['mmWatermarkWriteId']; + } } } @@ -4822,6 +4844,20 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 15: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->mmNextWriteId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 16: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->mmWatermarkWriteId); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -4936,6 +4972,16 @@ public function write($output) { $xfer += $output->writeBool($this->temporary); $xfer += $output->writeFieldEnd(); } + if ($this->mmNextWriteId !== null) { + $xfer += $output->writeFieldBegin('mmNextWriteId', TType::I64, 15); + $xfer += $output->writeI64($this->mmNextWriteId); + $xfer += $output->writeFieldEnd(); + } + if ($this->mmWatermarkWriteId !== null) { + $xfer += $output->writeFieldBegin('mmWatermarkWriteId', TType::I64, 16); + $xfer += $output->writeI64($this->mmWatermarkWriteId); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -17463,37 +17509,43 @@ public function write($output) { } -class GetAllFunctionsResponse { +class GetNextWriteIdRequest { static $_TSPEC; /** - * @var \metastore\Function[] + * @var string */ - public $functions = null; + public $dbName = null; + /** + * @var string + */ + public $tblName = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'functions', - 'type' => TType::LST, - 'etype' => TType::STRUCT, - 'elem' => array( - 'type' => TType::STRUCT, - 'class' => '\metastore\Function', - ), + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tblName', + 'type' => TType::STRING, ), ); } if (is_array($vals)) { - if (isset($vals['functions'])) { - $this->functions = $vals['functions']; + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tblName'])) { + $this->tblName = $vals['tblName']; } } } public function getName() { - return 'GetAllFunctionsResponse'; + return 'GetNextWriteIdRequest'; } public function read($input) @@ -17512,19 +17564,15 @@ public function read($input) switch ($fid) { case 1: - if ($ftype == TType::LST) { - $this->functions = array(); - $_size562 = 0; - $_etype565 = 0; - $xfer += $input->readListBegin($_etype565, $_size562); - for ($_i566 = 0; $_i566 < $_size562; ++$_i566) - { - $elem567 = null; - $elem567 = new \metastore\Function(); - $xfer += $elem567->read($input); - $this->functions []= $elem567; - } - $xfer += $input->readListEnd(); + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tblName); } else { $xfer += $input->skip($ftype); } @@ -17541,22 +17589,90 @@ public function read($input) public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('GetAllFunctionsResponse'); - if ($this->functions !== null) { - if (!is_array($this->functions)) { - throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + $xfer += $output->writeStructBegin('GetNextWriteIdRequest'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tblName !== null) { + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeString($this->tblName); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetNextWriteIdResult { + static $_TSPEC; + + /** + * @var int + */ + public $writeId = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; } - $xfer += $output->writeFieldBegin('functions', TType::LST, 1); + } + } + + public function getName() { + return 'GetNextWriteIdResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) { - $output->writeListBegin(TType::STRUCT, count($this->functions)); - { - foreach ($this->functions as $iter568) - { - $xfer += $iter568->write($output); + case 1: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); } - } - $output->writeListEnd(); + break; + default: + $xfer += $input->skip($ftype); + break; } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetNextWriteIdResult'); + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 1); + $xfer += $output->writeI64($this->writeId); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -17566,7 +17682,7 @@ public function write($output) { } -class TableMeta { +class FinalizeWriteIdRequest { static $_TSPEC; /** @@ -17576,15 +17692,15 @@ class TableMeta { /** * @var string */ - public $tableName = null; + public $tblName = null; /** - * @var string + * @var int */ - public $tableType = null; + public $writeId = null; /** - * @var string + * @var bool */ - public $comments = null; + public $commit = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -17594,16 +17710,16 @@ public function __construct($vals=null) { 'type' => TType::STRING, ), 2 => array( - 'var' => 'tableName', + 'var' => 'tblName', 'type' => TType::STRING, ), 3 => array( - 'var' => 'tableType', - 'type' => TType::STRING, + 'var' => 'writeId', + 'type' => TType::I64, ), 4 => array( - 'var' => 'comments', - 'type' => TType::STRING, + 'var' => 'commit', + 'type' => TType::BOOL, ), ); } @@ -17611,20 +17727,20 @@ public function __construct($vals=null) { if (isset($vals['dbName'])) { $this->dbName = $vals['dbName']; } - if (isset($vals['tableName'])) { - $this->tableName = $vals['tableName']; + if (isset($vals['tblName'])) { + $this->tblName = $vals['tblName']; } - if (isset($vals['tableType'])) { - $this->tableType = $vals['tableType']; + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; } - if (isset($vals['comments'])) { - $this->comments = $vals['comments']; + if (isset($vals['commit'])) { + $this->commit = $vals['commit']; } } } public function getName() { - return 'TableMeta'; + return 'FinalizeWriteIdRequest'; } public function read($input) @@ -17651,21 +17767,21 @@ public function read($input) break; case 2: if ($ftype == TType::STRING) { - $xfer += $input->readString($this->tableName); + $xfer += $input->readString($this->tblName); } else { $xfer += $input->skip($ftype); } break; case 3: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->tableType); + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); } else { $xfer += $input->skip($ftype); } break; case 4: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->comments); + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->commit); } else { $xfer += $input->skip($ftype); } @@ -17682,25 +17798,25 @@ public function read($input) public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('TableMeta'); + $xfer += $output->writeStructBegin('FinalizeWriteIdRequest'); if ($this->dbName !== null) { $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); $xfer += $output->writeString($this->dbName); $xfer += $output->writeFieldEnd(); } - if ($this->tableName !== null) { - $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2); - $xfer += $output->writeString($this->tableName); + if ($this->tblName !== null) { + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeString($this->tblName); $xfer += $output->writeFieldEnd(); } - if ($this->tableType !== null) { - $xfer += $output->writeFieldBegin('tableType', TType::STRING, 3); - $xfer += $output->writeString($this->tableType); + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 3); + $xfer += $output->writeI64($this->writeId); $xfer += $output->writeFieldEnd(); } - if ($this->comments !== null) { - $xfer += $output->writeFieldBegin('comments', TType::STRING, 4); - $xfer += $output->writeString($this->comments); + if ($this->commit !== null) { + $xfer += $output->writeFieldBegin('commit', TType::BOOL, 4); + $xfer += $output->writeBool($this->commit); $xfer += $output->writeFieldEnd(); } $xfer += $output->writeFieldStop(); @@ -17710,32 +17826,19 @@ public function write($output) { } -class MetaException extends TException { +class FinalizeWriteIdResult { static $_TSPEC; - /** - * @var string - */ - public $message = null; - public function __construct($vals=null) { + public function __construct() { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( - 1 => array( - 'var' => 'message', - 'type' => TType::STRING, - ), ); } - if (is_array($vals)) { - if (isset($vals['message'])) { - $this->message = $vals['message']; - } - } } public function getName() { - return 'MetaException'; + return 'FinalizeWriteIdResult'; } public function read($input) @@ -17753,13 +17856,6 @@ public function read($input) } switch ($fid) { - case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->message); - } else { - $xfer += $input->skip($ftype); - } - break; default: $xfer += $input->skip($ftype); break; @@ -17772,12 +17868,7 @@ public function read($input) public function write($output) { $xfer = 0; - $xfer += $output->writeStructBegin('MetaException'); - if ($this->message !== null) { - $xfer += $output->writeFieldBegin('message', TType::STRING, 1); - $xfer += $output->writeString($this->message); - $xfer += $output->writeFieldEnd(); - } + $xfer += $output->writeStructBegin('FinalizeWriteIdResult'); $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -17785,27 +17876,788 @@ public function write($output) { } -class UnknownTableException extends TException { +class HeartbeatWriteIdRequest { static $_TSPEC; /** * @var string */ - public $message = null; + public $dbName = null; + /** + * @var string + */ + public $tblName = null; + /** + * @var int + */ + public $writeId = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'message', + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tblName', 'type' => TType::STRING, ), + 3 => array( + 'var' => 'writeId', + 'type' => TType::I64, + ), ); } if (is_array($vals)) { - if (isset($vals['message'])) { - $this->message = $vals['message']; - } + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tblName'])) { + $this->tblName = $vals['tblName']; + } + if (isset($vals['writeId'])) { + $this->writeId = $vals['writeId']; + } + } + } + + public function getName() { + return 'HeartbeatWriteIdRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tblName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->writeId); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('HeartbeatWriteIdRequest'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tblName !== null) { + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeString($this->tblName); + $xfer += $output->writeFieldEnd(); + } + if ($this->writeId !== null) { + $xfer += $output->writeFieldBegin('writeId', TType::I64, 3); + $xfer += $output->writeI64($this->writeId); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class HeartbeatWriteIdResult { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'HeartbeatWriteIdResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('HeartbeatWriteIdResult'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetValidWriteIdsRequest { + static $_TSPEC; + + /** + * @var string + */ + public $dbName = null; + /** + * @var string + */ + public $tblName = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tblName', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tblName'])) { + $this->tblName = $vals['tblName']; + } + } + } + + public function getName() { + return 'GetValidWriteIdsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tblName); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetValidWriteIdsRequest'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tblName !== null) { + $xfer += $output->writeFieldBegin('tblName', TType::STRING, 2); + $xfer += $output->writeString($this->tblName); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetValidWriteIdsResult { + static $_TSPEC; + + /** + * @var int + */ + public $lowWatermarkId = null; + /** + * @var int + */ + public $highWatermarkId = null; + /** + * @var bool + */ + public $areIdsValid = null; + /** + * @var int[] + */ + public $ids = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'lowWatermarkId', + 'type' => TType::I64, + ), + 2 => array( + 'var' => 'highWatermarkId', + 'type' => TType::I64, + ), + 3 => array( + 'var' => 'areIdsValid', + 'type' => TType::BOOL, + ), + 4 => array( + 'var' => 'ids', + 'type' => TType::LST, + 'etype' => TType::I64, + 'elem' => array( + 'type' => TType::I64, + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['lowWatermarkId'])) { + $this->lowWatermarkId = $vals['lowWatermarkId']; + } + if (isset($vals['highWatermarkId'])) { + $this->highWatermarkId = $vals['highWatermarkId']; + } + if (isset($vals['areIdsValid'])) { + $this->areIdsValid = $vals['areIdsValid']; + } + if (isset($vals['ids'])) { + $this->ids = $vals['ids']; + } + } + } + + public function getName() { + return 'GetValidWriteIdsResult'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->lowWatermarkId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->highWatermarkId); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->areIdsValid); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::LST) { + $this->ids = array(); + $_size562 = 0; + $_etype565 = 0; + $xfer += $input->readListBegin($_etype565, $_size562); + for ($_i566 = 0; $_i566 < $_size562; ++$_i566) + { + $elem567 = null; + $xfer += $input->readI64($elem567); + $this->ids []= $elem567; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetValidWriteIdsResult'); + if ($this->lowWatermarkId !== null) { + $xfer += $output->writeFieldBegin('lowWatermarkId', TType::I64, 1); + $xfer += $output->writeI64($this->lowWatermarkId); + $xfer += $output->writeFieldEnd(); + } + if ($this->highWatermarkId !== null) { + $xfer += $output->writeFieldBegin('highWatermarkId', TType::I64, 2); + $xfer += $output->writeI64($this->highWatermarkId); + $xfer += $output->writeFieldEnd(); + } + if ($this->areIdsValid !== null) { + $xfer += $output->writeFieldBegin('areIdsValid', TType::BOOL, 3); + $xfer += $output->writeBool($this->areIdsValid); + $xfer += $output->writeFieldEnd(); + } + if ($this->ids !== null) { + if (!is_array($this->ids)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('ids', TType::LST, 4); + { + $output->writeListBegin(TType::I64, count($this->ids)); + { + foreach ($this->ids as $iter568) + { + $xfer += $output->writeI64($iter568); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class GetAllFunctionsResponse { + static $_TSPEC; + + /** + * @var \metastore\Function[] + */ + public $functions = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'functions', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\Function', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['functions'])) { + $this->functions = $vals['functions']; + } + } + } + + public function getName() { + return 'GetAllFunctionsResponse'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->functions = array(); + $_size569 = 0; + $_etype572 = 0; + $xfer += $input->readListBegin($_etype572, $_size569); + for ($_i573 = 0; $_i573 < $_size569; ++$_i573) + { + $elem574 = null; + $elem574 = new \metastore\Function(); + $xfer += $elem574->read($input); + $this->functions []= $elem574; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('GetAllFunctionsResponse'); + if ($this->functions !== null) { + if (!is_array($this->functions)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('functions', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->functions)); + { + foreach ($this->functions as $iter575) + { + $xfer += $iter575->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class TableMeta { + static $_TSPEC; + + /** + * @var string + */ + public $dbName = null; + /** + * @var string + */ + public $tableName = null; + /** + * @var string + */ + public $tableType = null; + /** + * @var string + */ + public $comments = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'dbName', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tableName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'tableType', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'comments', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['dbName'])) { + $this->dbName = $vals['dbName']; + } + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + if (isset($vals['tableType'])) { + $this->tableType = $vals['tableType']; + } + if (isset($vals['comments'])) { + $this->comments = $vals['comments']; + } + } + } + + public function getName() { + return 'TableMeta'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->dbName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableType); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->comments); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TableMeta'); + if ($this->dbName !== null) { + $xfer += $output->writeFieldBegin('dbName', TType::STRING, 1); + $xfer += $output->writeString($this->dbName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 2); + $xfer += $output->writeString($this->tableName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableType !== null) { + $xfer += $output->writeFieldBegin('tableType', TType::STRING, 3); + $xfer += $output->writeString($this->tableType); + $xfer += $output->writeFieldEnd(); + } + if ($this->comments !== null) { + $xfer += $output->writeFieldBegin('comments', TType::STRING, 4); + $xfer += $output->writeString($this->comments); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class MetaException extends TException { + static $_TSPEC; + + /** + * @var string + */ + public $message = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'message', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['message'])) { + $this->message = $vals['message']; + } + } + } + + public function getName() { + return 'MetaException'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->message); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('MetaException'); + if ($this->message !== null) { + $xfer += $output->writeFieldBegin('message', TType::STRING, 1); + $xfer += $output->writeString($this->message); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class UnknownTableException extends TException { + static $_TSPEC; + + /** + * @var string + */ + public $message = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'message', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['message'])) { + $this->message = $vals['message']; + } } } @@ -18935,6 +19787,7 @@ final class Constant extends \Thrift\Type\TConstant { static protected $TABLE_IS_TRANSACTIONAL; static protected $TABLE_NO_AUTO_COMPACT; static protected $TABLE_TRANSACTIONAL_PROPERTIES; + static protected $TABLE_IS_MM; static protected function init_DDL_TIME() { return "transient_lastDdlTime"; @@ -19031,6 +19884,10 @@ static protected function init_TABLE_NO_AUTO_COMPACT() { static protected function init_TABLE_TRANSACTIONAL_PROPERTIES() { return "transactional_properties"; } + + static protected function init_TABLE_IS_MM() { + return "hivecommit"; + } } diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 8f505f10aef7..555cd2782507 100755 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -174,6 +174,10 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req)') print(' ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req)') print(' CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req)') + print(' GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req)') + print(' FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req)') + print(' HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req)') + print(' GetValidWriteIdsResult get_valid_write_ids(GetValidWriteIdsRequest req)') print(' string getName()') print(' string getVersion()') print(' fb_status getStatus()') @@ -1143,6 +1147,30 @@ elif cmd == 'cache_file_metadata': sys.exit(1) pp.pprint(client.cache_file_metadata(eval(args[0]),)) +elif cmd == 'get_next_write_id': + if len(args) != 1: + print('get_next_write_id requires 1 args') + sys.exit(1) + pp.pprint(client.get_next_write_id(eval(args[0]),)) + +elif cmd == 'finalize_write_id': + if len(args) != 1: + print('finalize_write_id requires 1 args') + sys.exit(1) + pp.pprint(client.finalize_write_id(eval(args[0]),)) + +elif cmd == 'heartbeat_write_id': + if len(args) != 1: + print('heartbeat_write_id requires 1 args') + sys.exit(1) + pp.pprint(client.heartbeat_write_id(eval(args[0]),)) + +elif cmd == 'get_valid_write_ids': + if len(args) != 1: + print('get_valid_write_ids requires 1 args') + sys.exit(1) + pp.pprint(client.get_valid_write_ids(eval(args[0]),)) + elif cmd == 'getName': if len(args) != 0: print('getName requires 0 args') diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 10778f2936c6..6f157d84e214 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -1209,6 +1209,34 @@ def cache_file_metadata(self, req): """ pass + def get_next_write_id(self, req): + """ + Parameters: + - req + """ + pass + + def finalize_write_id(self, req): + """ + Parameters: + - req + """ + pass + + def heartbeat_write_id(self, req): + """ + Parameters: + - req + """ + pass + + def get_valid_write_ids(self, req): + """ + Parameters: + - req + """ + pass + class Client(fb303.FacebookService.Client, Iface): """ @@ -6642,6 +6670,130 @@ def recv_cache_file_metadata(self): return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "cache_file_metadata failed: unknown result") + def get_next_write_id(self, req): + """ + Parameters: + - req + """ + self.send_get_next_write_id(req) + return self.recv_get_next_write_id() + + def send_get_next_write_id(self, req): + self._oprot.writeMessageBegin('get_next_write_id', TMessageType.CALL, self._seqid) + args = get_next_write_id_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_next_write_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_next_write_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_next_write_id failed: unknown result") + + def finalize_write_id(self, req): + """ + Parameters: + - req + """ + self.send_finalize_write_id(req) + return self.recv_finalize_write_id() + + def send_finalize_write_id(self, req): + self._oprot.writeMessageBegin('finalize_write_id', TMessageType.CALL, self._seqid) + args = finalize_write_id_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_finalize_write_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = finalize_write_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "finalize_write_id failed: unknown result") + + def heartbeat_write_id(self, req): + """ + Parameters: + - req + """ + self.send_heartbeat_write_id(req) + return self.recv_heartbeat_write_id() + + def send_heartbeat_write_id(self, req): + self._oprot.writeMessageBegin('heartbeat_write_id', TMessageType.CALL, self._seqid) + args = heartbeat_write_id_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_heartbeat_write_id(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = heartbeat_write_id_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "heartbeat_write_id failed: unknown result") + + def get_valid_write_ids(self, req): + """ + Parameters: + - req + """ + self.send_get_valid_write_ids(req) + return self.recv_get_valid_write_ids() + + def send_get_valid_write_ids(self, req): + self._oprot.writeMessageBegin('get_valid_write_ids', TMessageType.CALL, self._seqid) + args = get_valid_write_ids_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_valid_write_ids(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_valid_write_ids_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_valid_write_ids failed: unknown result") + class Processor(fb303.FacebookService.Processor, Iface, TProcessor): def __init__(self, handler): @@ -6796,6 +6948,10 @@ def __init__(self, handler): self._processMap["put_file_metadata"] = Processor.process_put_file_metadata self._processMap["clear_file_metadata"] = Processor.process_clear_file_metadata self._processMap["cache_file_metadata"] = Processor.process_cache_file_metadata + self._processMap["get_next_write_id"] = Processor.process_get_next_write_id + self._processMap["finalize_write_id"] = Processor.process_finalize_write_id + self._processMap["heartbeat_write_id"] = Processor.process_heartbeat_write_id + self._processMap["get_valid_write_ids"] = Processor.process_get_valid_write_ids def process(self, iprot, oprot): (name, type, seqid) = iprot.readMessageBegin() @@ -10481,6 +10637,82 @@ def process_cache_file_metadata(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_next_write_id(self, seqid, iprot, oprot): + args = get_next_write_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_next_write_id_result() + try: + result.success = self._handler.get_next_write_id(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_next_write_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_finalize_write_id(self, seqid, iprot, oprot): + args = finalize_write_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = finalize_write_id_result() + try: + result.success = self._handler.finalize_write_id(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("finalize_write_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_heartbeat_write_id(self, seqid, iprot, oprot): + args = heartbeat_write_id_args() + args.read(iprot) + iprot.readMessageEnd() + result = heartbeat_write_id_result() + try: + result.success = self._handler.heartbeat_write_id(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("heartbeat_write_id", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_valid_write_ids(self, seqid, iprot, oprot): + args = get_valid_write_ids_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_valid_write_ids_result() + try: + result.success = self._handler.get_valid_write_ids(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_valid_write_ids", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + # HELPER FUNCTIONS AND STRUCTURES @@ -11367,10 +11599,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype569, _size566) = iprot.readListBegin() - for _i570 in xrange(_size566): - _elem571 = iprot.readString() - self.success.append(_elem571) + (_etype576, _size573) = iprot.readListBegin() + for _i577 in xrange(_size573): + _elem578 = iprot.readString() + self.success.append(_elem578) iprot.readListEnd() else: iprot.skip(ftype) @@ -11393,8 +11625,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter572 in self.success: - oprot.writeString(iter572) + for iter579 in self.success: + oprot.writeString(iter579) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11499,10 +11731,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype576, _size573) = iprot.readListBegin() - for _i577 in xrange(_size573): - _elem578 = iprot.readString() - self.success.append(_elem578) + (_etype583, _size580) = iprot.readListBegin() + for _i584 in xrange(_size580): + _elem585 = iprot.readString() + self.success.append(_elem585) iprot.readListEnd() else: iprot.skip(ftype) @@ -11525,8 +11757,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter579 in self.success: - oprot.writeString(iter579) + for iter586 in self.success: + oprot.writeString(iter586) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12296,12 +12528,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype581, _vtype582, _size580 ) = iprot.readMapBegin() - for _i584 in xrange(_size580): - _key585 = iprot.readString() - _val586 = Type() - _val586.read(iprot) - self.success[_key585] = _val586 + (_ktype588, _vtype589, _size587 ) = iprot.readMapBegin() + for _i591 in xrange(_size587): + _key592 = iprot.readString() + _val593 = Type() + _val593.read(iprot) + self.success[_key592] = _val593 iprot.readMapEnd() else: iprot.skip(ftype) @@ -12324,9 +12556,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter587,viter588 in self.success.items(): - oprot.writeString(kiter587) - viter588.write(oprot) + for kiter594,viter595 in self.success.items(): + oprot.writeString(kiter594) + viter595.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -12469,11 +12701,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype592, _size589) = iprot.readListBegin() - for _i593 in xrange(_size589): - _elem594 = FieldSchema() - _elem594.read(iprot) - self.success.append(_elem594) + (_etype599, _size596) = iprot.readListBegin() + for _i600 in xrange(_size596): + _elem601 = FieldSchema() + _elem601.read(iprot) + self.success.append(_elem601) iprot.readListEnd() else: iprot.skip(ftype) @@ -12508,8 +12740,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter595 in self.success: - iter595.write(oprot) + for iter602 in self.success: + iter602.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12676,11 +12908,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype599, _size596) = iprot.readListBegin() - for _i600 in xrange(_size596): - _elem601 = FieldSchema() - _elem601.read(iprot) - self.success.append(_elem601) + (_etype606, _size603) = iprot.readListBegin() + for _i607 in xrange(_size603): + _elem608 = FieldSchema() + _elem608.read(iprot) + self.success.append(_elem608) iprot.readListEnd() else: iprot.skip(ftype) @@ -12715,8 +12947,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter602 in self.success: - iter602.write(oprot) + for iter609 in self.success: + iter609.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12869,11 +13101,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype606, _size603) = iprot.readListBegin() - for _i607 in xrange(_size603): - _elem608 = FieldSchema() - _elem608.read(iprot) - self.success.append(_elem608) + (_etype613, _size610) = iprot.readListBegin() + for _i614 in xrange(_size610): + _elem615 = FieldSchema() + _elem615.read(iprot) + self.success.append(_elem615) iprot.readListEnd() else: iprot.skip(ftype) @@ -12908,8 +13140,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter609 in self.success: - iter609.write(oprot) + for iter616 in self.success: + iter616.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13076,11 +13308,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype613, _size610) = iprot.readListBegin() - for _i614 in xrange(_size610): - _elem615 = FieldSchema() - _elem615.read(iprot) - self.success.append(_elem615) + (_etype620, _size617) = iprot.readListBegin() + for _i621 in xrange(_size617): + _elem622 = FieldSchema() + _elem622.read(iprot) + self.success.append(_elem622) iprot.readListEnd() else: iprot.skip(ftype) @@ -13115,8 +13347,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter616 in self.success: - iter616.write(oprot) + for iter623 in self.success: + iter623.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13557,22 +13789,22 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype620, _size617) = iprot.readListBegin() - for _i621 in xrange(_size617): - _elem622 = SQLPrimaryKey() - _elem622.read(iprot) - self.primaryKeys.append(_elem622) + (_etype627, _size624) = iprot.readListBegin() + for _i628 in xrange(_size624): + _elem629 = SQLPrimaryKey() + _elem629.read(iprot) + self.primaryKeys.append(_elem629) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype626, _size623) = iprot.readListBegin() - for _i627 in xrange(_size623): - _elem628 = SQLForeignKey() - _elem628.read(iprot) - self.foreignKeys.append(_elem628) + (_etype633, _size630) = iprot.readListBegin() + for _i634 in xrange(_size630): + _elem635 = SQLForeignKey() + _elem635.read(iprot) + self.foreignKeys.append(_elem635) iprot.readListEnd() else: iprot.skip(ftype) @@ -13593,15 +13825,15 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter629 in self.primaryKeys: - iter629.write(oprot) + for iter636 in self.primaryKeys: + iter636.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter630 in self.foreignKeys: - iter630.write(oprot) + for iter637 in self.foreignKeys: + iter637.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14637,10 +14869,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype634, _size631) = iprot.readListBegin() - for _i635 in xrange(_size631): - _elem636 = iprot.readString() - self.success.append(_elem636) + (_etype641, _size638) = iprot.readListBegin() + for _i642 in xrange(_size638): + _elem643 = iprot.readString() + self.success.append(_elem643) iprot.readListEnd() else: iprot.skip(ftype) @@ -14663,8 +14895,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter637 in self.success: - oprot.writeString(iter637) + for iter644 in self.success: + oprot.writeString(iter644) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14814,10 +15046,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype641, _size638) = iprot.readListBegin() - for _i642 in xrange(_size638): - _elem643 = iprot.readString() - self.success.append(_elem643) + (_etype648, _size645) = iprot.readListBegin() + for _i649 in xrange(_size645): + _elem650 = iprot.readString() + self.success.append(_elem650) iprot.readListEnd() else: iprot.skip(ftype) @@ -14840,8 +15072,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter644 in self.success: - oprot.writeString(iter644) + for iter651 in self.success: + oprot.writeString(iter651) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14914,10 +15146,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype648, _size645) = iprot.readListBegin() - for _i649 in xrange(_size645): - _elem650 = iprot.readString() - self.tbl_types.append(_elem650) + (_etype655, _size652) = iprot.readListBegin() + for _i656 in xrange(_size652): + _elem657 = iprot.readString() + self.tbl_types.append(_elem657) iprot.readListEnd() else: iprot.skip(ftype) @@ -14942,8 +15174,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter651 in self.tbl_types: - oprot.writeString(iter651) + for iter658 in self.tbl_types: + oprot.writeString(iter658) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14999,11 +15231,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype655, _size652) = iprot.readListBegin() - for _i656 in xrange(_size652): - _elem657 = TableMeta() - _elem657.read(iprot) - self.success.append(_elem657) + (_etype662, _size659) = iprot.readListBegin() + for _i663 in xrange(_size659): + _elem664 = TableMeta() + _elem664.read(iprot) + self.success.append(_elem664) iprot.readListEnd() else: iprot.skip(ftype) @@ -15026,8 +15258,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter658 in self.success: - iter658.write(oprot) + for iter665 in self.success: + iter665.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15151,10 +15383,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype662, _size659) = iprot.readListBegin() - for _i663 in xrange(_size659): - _elem664 = iprot.readString() - self.success.append(_elem664) + (_etype669, _size666) = iprot.readListBegin() + for _i670 in xrange(_size666): + _elem671 = iprot.readString() + self.success.append(_elem671) iprot.readListEnd() else: iprot.skip(ftype) @@ -15177,8 +15409,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter665 in self.success: - oprot.writeString(iter665) + for iter672 in self.success: + oprot.writeString(iter672) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15414,10 +15646,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype669, _size666) = iprot.readListBegin() - for _i670 in xrange(_size666): - _elem671 = iprot.readString() - self.tbl_names.append(_elem671) + (_etype676, _size673) = iprot.readListBegin() + for _i677 in xrange(_size673): + _elem678 = iprot.readString() + self.tbl_names.append(_elem678) iprot.readListEnd() else: iprot.skip(ftype) @@ -15438,8 +15670,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter672 in self.tbl_names: - oprot.writeString(iter672) + for iter679 in self.tbl_names: + oprot.writeString(iter679) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15500,11 +15732,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype676, _size673) = iprot.readListBegin() - for _i677 in xrange(_size673): - _elem678 = Table() - _elem678.read(iprot) - self.success.append(_elem678) + (_etype683, _size680) = iprot.readListBegin() + for _i684 in xrange(_size680): + _elem685 = Table() + _elem685.read(iprot) + self.success.append(_elem685) iprot.readListEnd() else: iprot.skip(ftype) @@ -15539,8 +15771,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter679 in self.success: - iter679.write(oprot) + for iter686 in self.success: + iter686.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15706,10 +15938,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype683, _size680) = iprot.readListBegin() - for _i684 in xrange(_size680): - _elem685 = iprot.readString() - self.success.append(_elem685) + (_etype690, _size687) = iprot.readListBegin() + for _i691 in xrange(_size687): + _elem692 = iprot.readString() + self.success.append(_elem692) iprot.readListEnd() else: iprot.skip(ftype) @@ -15744,8 +15976,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter686 in self.success: - oprot.writeString(iter686) + for iter693 in self.success: + oprot.writeString(iter693) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16715,11 +16947,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype690, _size687) = iprot.readListBegin() - for _i691 in xrange(_size687): - _elem692 = Partition() - _elem692.read(iprot) - self.new_parts.append(_elem692) + (_etype697, _size694) = iprot.readListBegin() + for _i698 in xrange(_size694): + _elem699 = Partition() + _elem699.read(iprot) + self.new_parts.append(_elem699) iprot.readListEnd() else: iprot.skip(ftype) @@ -16736,8 +16968,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter693 in self.new_parts: - iter693.write(oprot) + for iter700 in self.new_parts: + iter700.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16895,11 +17127,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype697, _size694) = iprot.readListBegin() - for _i698 in xrange(_size694): - _elem699 = PartitionSpec() - _elem699.read(iprot) - self.new_parts.append(_elem699) + (_etype704, _size701) = iprot.readListBegin() + for _i705 in xrange(_size701): + _elem706 = PartitionSpec() + _elem706.read(iprot) + self.new_parts.append(_elem706) iprot.readListEnd() else: iprot.skip(ftype) @@ -16916,8 +17148,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter700 in self.new_parts: - iter700.write(oprot) + for iter707 in self.new_parts: + iter707.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17091,10 +17323,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype704, _size701) = iprot.readListBegin() - for _i705 in xrange(_size701): - _elem706 = iprot.readString() - self.part_vals.append(_elem706) + (_etype711, _size708) = iprot.readListBegin() + for _i712 in xrange(_size708): + _elem713 = iprot.readString() + self.part_vals.append(_elem713) iprot.readListEnd() else: iprot.skip(ftype) @@ -17119,8 +17351,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter707 in self.part_vals: - oprot.writeString(iter707) + for iter714 in self.part_vals: + oprot.writeString(iter714) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17473,10 +17705,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype711, _size708) = iprot.readListBegin() - for _i712 in xrange(_size708): - _elem713 = iprot.readString() - self.part_vals.append(_elem713) + (_etype718, _size715) = iprot.readListBegin() + for _i719 in xrange(_size715): + _elem720 = iprot.readString() + self.part_vals.append(_elem720) iprot.readListEnd() else: iprot.skip(ftype) @@ -17507,8 +17739,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter714 in self.part_vals: - oprot.writeString(iter714) + for iter721 in self.part_vals: + oprot.writeString(iter721) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -18103,10 +18335,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype718, _size715) = iprot.readListBegin() - for _i719 in xrange(_size715): - _elem720 = iprot.readString() - self.part_vals.append(_elem720) + (_etype725, _size722) = iprot.readListBegin() + for _i726 in xrange(_size722): + _elem727 = iprot.readString() + self.part_vals.append(_elem727) iprot.readListEnd() else: iprot.skip(ftype) @@ -18136,8 +18368,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter721 in self.part_vals: - oprot.writeString(iter721) + for iter728 in self.part_vals: + oprot.writeString(iter728) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -18310,10 +18542,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype725, _size722) = iprot.readListBegin() - for _i726 in xrange(_size722): - _elem727 = iprot.readString() - self.part_vals.append(_elem727) + (_etype732, _size729) = iprot.readListBegin() + for _i733 in xrange(_size729): + _elem734 = iprot.readString() + self.part_vals.append(_elem734) iprot.readListEnd() else: iprot.skip(ftype) @@ -18349,8 +18581,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter728 in self.part_vals: - oprot.writeString(iter728) + for iter735 in self.part_vals: + oprot.writeString(iter735) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -19087,10 +19319,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype732, _size729) = iprot.readListBegin() - for _i733 in xrange(_size729): - _elem734 = iprot.readString() - self.part_vals.append(_elem734) + (_etype739, _size736) = iprot.readListBegin() + for _i740 in xrange(_size736): + _elem741 = iprot.readString() + self.part_vals.append(_elem741) iprot.readListEnd() else: iprot.skip(ftype) @@ -19115,8 +19347,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter735 in self.part_vals: - oprot.writeString(iter735) + for iter742 in self.part_vals: + oprot.writeString(iter742) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19275,11 +19507,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype737, _vtype738, _size736 ) = iprot.readMapBegin() - for _i740 in xrange(_size736): - _key741 = iprot.readString() - _val742 = iprot.readString() - self.partitionSpecs[_key741] = _val742 + (_ktype744, _vtype745, _size743 ) = iprot.readMapBegin() + for _i747 in xrange(_size743): + _key748 = iprot.readString() + _val749 = iprot.readString() + self.partitionSpecs[_key748] = _val749 iprot.readMapEnd() else: iprot.skip(ftype) @@ -19316,9 +19548,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter743,viter744 in self.partitionSpecs.items(): - oprot.writeString(kiter743) - oprot.writeString(viter744) + for kiter750,viter751 in self.partitionSpecs.items(): + oprot.writeString(kiter750) + oprot.writeString(viter751) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -19523,11 +19755,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype746, _vtype747, _size745 ) = iprot.readMapBegin() - for _i749 in xrange(_size745): - _key750 = iprot.readString() - _val751 = iprot.readString() - self.partitionSpecs[_key750] = _val751 + (_ktype753, _vtype754, _size752 ) = iprot.readMapBegin() + for _i756 in xrange(_size752): + _key757 = iprot.readString() + _val758 = iprot.readString() + self.partitionSpecs[_key757] = _val758 iprot.readMapEnd() else: iprot.skip(ftype) @@ -19564,9 +19796,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter752,viter753 in self.partitionSpecs.items(): - oprot.writeString(kiter752) - oprot.writeString(viter753) + for kiter759,viter760 in self.partitionSpecs.items(): + oprot.writeString(kiter759) + oprot.writeString(viter760) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -19649,11 +19881,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype757, _size754) = iprot.readListBegin() - for _i758 in xrange(_size754): - _elem759 = Partition() - _elem759.read(iprot) - self.success.append(_elem759) + (_etype764, _size761) = iprot.readListBegin() + for _i765 in xrange(_size761): + _elem766 = Partition() + _elem766.read(iprot) + self.success.append(_elem766) iprot.readListEnd() else: iprot.skip(ftype) @@ -19694,8 +19926,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter760 in self.success: - iter760.write(oprot) + for iter767 in self.success: + iter767.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19789,10 +20021,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype764, _size761) = iprot.readListBegin() - for _i765 in xrange(_size761): - _elem766 = iprot.readString() - self.part_vals.append(_elem766) + (_etype771, _size768) = iprot.readListBegin() + for _i772 in xrange(_size768): + _elem773 = iprot.readString() + self.part_vals.append(_elem773) iprot.readListEnd() else: iprot.skip(ftype) @@ -19804,10 +20036,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype770, _size767) = iprot.readListBegin() - for _i771 in xrange(_size767): - _elem772 = iprot.readString() - self.group_names.append(_elem772) + (_etype777, _size774) = iprot.readListBegin() + for _i778 in xrange(_size774): + _elem779 = iprot.readString() + self.group_names.append(_elem779) iprot.readListEnd() else: iprot.skip(ftype) @@ -19832,8 +20064,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter773 in self.part_vals: - oprot.writeString(iter773) + for iter780 in self.part_vals: + oprot.writeString(iter780) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -19843,8 +20075,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter774 in self.group_names: - oprot.writeString(iter774) + for iter781 in self.group_names: + oprot.writeString(iter781) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20273,11 +20505,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype778, _size775) = iprot.readListBegin() - for _i779 in xrange(_size775): - _elem780 = Partition() - _elem780.read(iprot) - self.success.append(_elem780) + (_etype785, _size782) = iprot.readListBegin() + for _i786 in xrange(_size782): + _elem787 = Partition() + _elem787.read(iprot) + self.success.append(_elem787) iprot.readListEnd() else: iprot.skip(ftype) @@ -20306,8 +20538,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter781 in self.success: - iter781.write(oprot) + for iter788 in self.success: + iter788.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20401,10 +20633,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype785, _size782) = iprot.readListBegin() - for _i786 in xrange(_size782): - _elem787 = iprot.readString() - self.group_names.append(_elem787) + (_etype792, _size789) = iprot.readListBegin() + for _i793 in xrange(_size789): + _elem794 = iprot.readString() + self.group_names.append(_elem794) iprot.readListEnd() else: iprot.skip(ftype) @@ -20437,8 +20669,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter788 in self.group_names: - oprot.writeString(iter788) + for iter795 in self.group_names: + oprot.writeString(iter795) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20499,11 +20731,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype792, _size789) = iprot.readListBegin() - for _i793 in xrange(_size789): - _elem794 = Partition() - _elem794.read(iprot) - self.success.append(_elem794) + (_etype799, _size796) = iprot.readListBegin() + for _i800 in xrange(_size796): + _elem801 = Partition() + _elem801.read(iprot) + self.success.append(_elem801) iprot.readListEnd() else: iprot.skip(ftype) @@ -20532,8 +20764,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter795 in self.success: - iter795.write(oprot) + for iter802 in self.success: + iter802.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20691,11 +20923,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype799, _size796) = iprot.readListBegin() - for _i800 in xrange(_size796): - _elem801 = PartitionSpec() - _elem801.read(iprot) - self.success.append(_elem801) + (_etype806, _size803) = iprot.readListBegin() + for _i807 in xrange(_size803): + _elem808 = PartitionSpec() + _elem808.read(iprot) + self.success.append(_elem808) iprot.readListEnd() else: iprot.skip(ftype) @@ -20724,8 +20956,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter802 in self.success: - iter802.write(oprot) + for iter809 in self.success: + iter809.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20880,10 +21112,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype806, _size803) = iprot.readListBegin() - for _i807 in xrange(_size803): - _elem808 = iprot.readString() - self.success.append(_elem808) + (_etype813, _size810) = iprot.readListBegin() + for _i814 in xrange(_size810): + _elem815 = iprot.readString() + self.success.append(_elem815) iprot.readListEnd() else: iprot.skip(ftype) @@ -20906,8 +21138,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter809 in self.success: - oprot.writeString(iter809) + for iter816 in self.success: + oprot.writeString(iter816) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -20983,10 +21215,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype813, _size810) = iprot.readListBegin() - for _i814 in xrange(_size810): - _elem815 = iprot.readString() - self.part_vals.append(_elem815) + (_etype820, _size817) = iprot.readListBegin() + for _i821 in xrange(_size817): + _elem822 = iprot.readString() + self.part_vals.append(_elem822) iprot.readListEnd() else: iprot.skip(ftype) @@ -21016,8 +21248,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter816 in self.part_vals: - oprot.writeString(iter816) + for iter823 in self.part_vals: + oprot.writeString(iter823) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -21081,11 +21313,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype820, _size817) = iprot.readListBegin() - for _i821 in xrange(_size817): - _elem822 = Partition() - _elem822.read(iprot) - self.success.append(_elem822) + (_etype827, _size824) = iprot.readListBegin() + for _i828 in xrange(_size824): + _elem829 = Partition() + _elem829.read(iprot) + self.success.append(_elem829) iprot.readListEnd() else: iprot.skip(ftype) @@ -21114,8 +21346,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter823 in self.success: - iter823.write(oprot) + for iter830 in self.success: + iter830.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21202,10 +21434,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype827, _size824) = iprot.readListBegin() - for _i828 in xrange(_size824): - _elem829 = iprot.readString() - self.part_vals.append(_elem829) + (_etype834, _size831) = iprot.readListBegin() + for _i835 in xrange(_size831): + _elem836 = iprot.readString() + self.part_vals.append(_elem836) iprot.readListEnd() else: iprot.skip(ftype) @@ -21222,10 +21454,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype833, _size830) = iprot.readListBegin() - for _i834 in xrange(_size830): - _elem835 = iprot.readString() - self.group_names.append(_elem835) + (_etype840, _size837) = iprot.readListBegin() + for _i841 in xrange(_size837): + _elem842 = iprot.readString() + self.group_names.append(_elem842) iprot.readListEnd() else: iprot.skip(ftype) @@ -21250,8 +21482,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter836 in self.part_vals: - oprot.writeString(iter836) + for iter843 in self.part_vals: + oprot.writeString(iter843) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -21265,8 +21497,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter837 in self.group_names: - oprot.writeString(iter837) + for iter844 in self.group_names: + oprot.writeString(iter844) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21328,11 +21560,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype841, _size838) = iprot.readListBegin() - for _i842 in xrange(_size838): - _elem843 = Partition() - _elem843.read(iprot) - self.success.append(_elem843) + (_etype848, _size845) = iprot.readListBegin() + for _i849 in xrange(_size845): + _elem850 = Partition() + _elem850.read(iprot) + self.success.append(_elem850) iprot.readListEnd() else: iprot.skip(ftype) @@ -21361,8 +21593,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter844 in self.success: - iter844.write(oprot) + for iter851 in self.success: + iter851.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21443,10 +21675,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype848, _size845) = iprot.readListBegin() - for _i849 in xrange(_size845): - _elem850 = iprot.readString() - self.part_vals.append(_elem850) + (_etype855, _size852) = iprot.readListBegin() + for _i856 in xrange(_size852): + _elem857 = iprot.readString() + self.part_vals.append(_elem857) iprot.readListEnd() else: iprot.skip(ftype) @@ -21476,8 +21708,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter851 in self.part_vals: - oprot.writeString(iter851) + for iter858 in self.part_vals: + oprot.writeString(iter858) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -21541,10 +21773,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype855, _size852) = iprot.readListBegin() - for _i856 in xrange(_size852): - _elem857 = iprot.readString() - self.success.append(_elem857) + (_etype862, _size859) = iprot.readListBegin() + for _i863 in xrange(_size859): + _elem864 = iprot.readString() + self.success.append(_elem864) iprot.readListEnd() else: iprot.skip(ftype) @@ -21573,8 +21805,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter858 in self.success: - oprot.writeString(iter858) + for iter865 in self.success: + oprot.writeString(iter865) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21745,11 +21977,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype862, _size859) = iprot.readListBegin() - for _i863 in xrange(_size859): - _elem864 = Partition() - _elem864.read(iprot) - self.success.append(_elem864) + (_etype869, _size866) = iprot.readListBegin() + for _i870 in xrange(_size866): + _elem871 = Partition() + _elem871.read(iprot) + self.success.append(_elem871) iprot.readListEnd() else: iprot.skip(ftype) @@ -21778,8 +22010,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter865 in self.success: - iter865.write(oprot) + for iter872 in self.success: + iter872.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21950,11 +22182,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype869, _size866) = iprot.readListBegin() - for _i870 in xrange(_size866): - _elem871 = PartitionSpec() - _elem871.read(iprot) - self.success.append(_elem871) + (_etype876, _size873) = iprot.readListBegin() + for _i877 in xrange(_size873): + _elem878 = PartitionSpec() + _elem878.read(iprot) + self.success.append(_elem878) iprot.readListEnd() else: iprot.skip(ftype) @@ -21983,8 +22215,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter872 in self.success: - iter872.write(oprot) + for iter879 in self.success: + iter879.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22404,10 +22636,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype876, _size873) = iprot.readListBegin() - for _i877 in xrange(_size873): - _elem878 = iprot.readString() - self.names.append(_elem878) + (_etype883, _size880) = iprot.readListBegin() + for _i884 in xrange(_size880): + _elem885 = iprot.readString() + self.names.append(_elem885) iprot.readListEnd() else: iprot.skip(ftype) @@ -22432,8 +22664,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter879 in self.names: - oprot.writeString(iter879) + for iter886 in self.names: + oprot.writeString(iter886) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22492,11 +22724,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype883, _size880) = iprot.readListBegin() - for _i884 in xrange(_size880): - _elem885 = Partition() - _elem885.read(iprot) - self.success.append(_elem885) + (_etype890, _size887) = iprot.readListBegin() + for _i891 in xrange(_size887): + _elem892 = Partition() + _elem892.read(iprot) + self.success.append(_elem892) iprot.readListEnd() else: iprot.skip(ftype) @@ -22525,8 +22757,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter886 in self.success: - iter886.write(oprot) + for iter893 in self.success: + iter893.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22776,11 +23008,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype890, _size887) = iprot.readListBegin() - for _i891 in xrange(_size887): - _elem892 = Partition() - _elem892.read(iprot) - self.new_parts.append(_elem892) + (_etype897, _size894) = iprot.readListBegin() + for _i898 in xrange(_size894): + _elem899 = Partition() + _elem899.read(iprot) + self.new_parts.append(_elem899) iprot.readListEnd() else: iprot.skip(ftype) @@ -22805,8 +23037,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter893 in self.new_parts: - iter893.write(oprot) + for iter900 in self.new_parts: + iter900.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22959,11 +23191,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype897, _size894) = iprot.readListBegin() - for _i898 in xrange(_size894): - _elem899 = Partition() - _elem899.read(iprot) - self.new_parts.append(_elem899) + (_etype904, _size901) = iprot.readListBegin() + for _i905 in xrange(_size901): + _elem906 = Partition() + _elem906.read(iprot) + self.new_parts.append(_elem906) iprot.readListEnd() else: iprot.skip(ftype) @@ -22994,8 +23226,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter900 in self.new_parts: - iter900.write(oprot) + for iter907 in self.new_parts: + iter907.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -23339,10 +23571,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype904, _size901) = iprot.readListBegin() - for _i905 in xrange(_size901): - _elem906 = iprot.readString() - self.part_vals.append(_elem906) + (_etype911, _size908) = iprot.readListBegin() + for _i912 in xrange(_size908): + _elem913 = iprot.readString() + self.part_vals.append(_elem913) iprot.readListEnd() else: iprot.skip(ftype) @@ -23373,8 +23605,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter907 in self.part_vals: - oprot.writeString(iter907) + for iter914 in self.part_vals: + oprot.writeString(iter914) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -23516,10 +23748,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype911, _size908) = iprot.readListBegin() - for _i912 in xrange(_size908): - _elem913 = iprot.readString() - self.part_vals.append(_elem913) + (_etype918, _size915) = iprot.readListBegin() + for _i919 in xrange(_size915): + _elem920 = iprot.readString() + self.part_vals.append(_elem920) iprot.readListEnd() else: iprot.skip(ftype) @@ -23541,8 +23773,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter914 in self.part_vals: - oprot.writeString(iter914) + for iter921 in self.part_vals: + oprot.writeString(iter921) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -23900,10 +24132,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype918, _size915) = iprot.readListBegin() - for _i919 in xrange(_size915): - _elem920 = iprot.readString() - self.success.append(_elem920) + (_etype925, _size922) = iprot.readListBegin() + for _i926 in xrange(_size922): + _elem927 = iprot.readString() + self.success.append(_elem927) iprot.readListEnd() else: iprot.skip(ftype) @@ -23926,8 +24158,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter921 in self.success: - oprot.writeString(iter921) + for iter928 in self.success: + oprot.writeString(iter928) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24051,11 +24283,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype923, _vtype924, _size922 ) = iprot.readMapBegin() - for _i926 in xrange(_size922): - _key927 = iprot.readString() - _val928 = iprot.readString() - self.success[_key927] = _val928 + (_ktype930, _vtype931, _size929 ) = iprot.readMapBegin() + for _i933 in xrange(_size929): + _key934 = iprot.readString() + _val935 = iprot.readString() + self.success[_key934] = _val935 iprot.readMapEnd() else: iprot.skip(ftype) @@ -24078,9 +24310,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter929,viter930 in self.success.items(): - oprot.writeString(kiter929) - oprot.writeString(viter930) + for kiter936,viter937 in self.success.items(): + oprot.writeString(kiter936) + oprot.writeString(viter937) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24156,11 +24388,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype932, _vtype933, _size931 ) = iprot.readMapBegin() - for _i935 in xrange(_size931): - _key936 = iprot.readString() - _val937 = iprot.readString() - self.part_vals[_key936] = _val937 + (_ktype939, _vtype940, _size938 ) = iprot.readMapBegin() + for _i942 in xrange(_size938): + _key943 = iprot.readString() + _val944 = iprot.readString() + self.part_vals[_key943] = _val944 iprot.readMapEnd() else: iprot.skip(ftype) @@ -24190,9 +24422,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter938,viter939 in self.part_vals.items(): - oprot.writeString(kiter938) - oprot.writeString(viter939) + for kiter945,viter946 in self.part_vals.items(): + oprot.writeString(kiter945) + oprot.writeString(viter946) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -24406,11 +24638,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype941, _vtype942, _size940 ) = iprot.readMapBegin() - for _i944 in xrange(_size940): - _key945 = iprot.readString() - _val946 = iprot.readString() - self.part_vals[_key945] = _val946 + (_ktype948, _vtype949, _size947 ) = iprot.readMapBegin() + for _i951 in xrange(_size947): + _key952 = iprot.readString() + _val953 = iprot.readString() + self.part_vals[_key952] = _val953 iprot.readMapEnd() else: iprot.skip(ftype) @@ -24440,9 +24672,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter947,viter948 in self.part_vals.items(): - oprot.writeString(kiter947) - oprot.writeString(viter948) + for kiter954,viter955 in self.part_vals.items(): + oprot.writeString(kiter954) + oprot.writeString(viter955) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -25497,11 +25729,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype952, _size949) = iprot.readListBegin() - for _i953 in xrange(_size949): - _elem954 = Index() - _elem954.read(iprot) - self.success.append(_elem954) + (_etype959, _size956) = iprot.readListBegin() + for _i960 in xrange(_size956): + _elem961 = Index() + _elem961.read(iprot) + self.success.append(_elem961) iprot.readListEnd() else: iprot.skip(ftype) @@ -25530,8 +25762,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter955 in self.success: - iter955.write(oprot) + for iter962 in self.success: + iter962.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25686,10 +25918,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype959, _size956) = iprot.readListBegin() - for _i960 in xrange(_size956): - _elem961 = iprot.readString() - self.success.append(_elem961) + (_etype966, _size963) = iprot.readListBegin() + for _i967 in xrange(_size963): + _elem968 = iprot.readString() + self.success.append(_elem968) iprot.readListEnd() else: iprot.skip(ftype) @@ -25712,8 +25944,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter962 in self.success: - oprot.writeString(iter962) + for iter969 in self.success: + oprot.writeString(iter969) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -28579,10 +28811,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype966, _size963) = iprot.readListBegin() - for _i967 in xrange(_size963): - _elem968 = iprot.readString() - self.success.append(_elem968) + (_etype973, _size970) = iprot.readListBegin() + for _i974 in xrange(_size970): + _elem975 = iprot.readString() + self.success.append(_elem975) iprot.readListEnd() else: iprot.skip(ftype) @@ -28605,8 +28837,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter969 in self.success: - oprot.writeString(iter969) + for iter976 in self.success: + oprot.writeString(iter976) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29294,10 +29526,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype973, _size970) = iprot.readListBegin() - for _i974 in xrange(_size970): - _elem975 = iprot.readString() - self.success.append(_elem975) + (_etype980, _size977) = iprot.readListBegin() + for _i981 in xrange(_size977): + _elem982 = iprot.readString() + self.success.append(_elem982) iprot.readListEnd() else: iprot.skip(ftype) @@ -29320,8 +29552,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter976 in self.success: - oprot.writeString(iter976) + for iter983 in self.success: + oprot.writeString(iter983) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29835,11 +30067,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype980, _size977) = iprot.readListBegin() - for _i981 in xrange(_size977): - _elem982 = Role() - _elem982.read(iprot) - self.success.append(_elem982) + (_etype987, _size984) = iprot.readListBegin() + for _i988 in xrange(_size984): + _elem989 = Role() + _elem989.read(iprot) + self.success.append(_elem989) iprot.readListEnd() else: iprot.skip(ftype) @@ -29862,8 +30094,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter983 in self.success: - iter983.write(oprot) + for iter990 in self.success: + iter990.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30372,10 +30604,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype987, _size984) = iprot.readListBegin() - for _i988 in xrange(_size984): - _elem989 = iprot.readString() - self.group_names.append(_elem989) + (_etype994, _size991) = iprot.readListBegin() + for _i995 in xrange(_size991): + _elem996 = iprot.readString() + self.group_names.append(_elem996) iprot.readListEnd() else: iprot.skip(ftype) @@ -30400,8 +30632,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter990 in self.group_names: - oprot.writeString(iter990) + for iter997 in self.group_names: + oprot.writeString(iter997) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -30628,11 +30860,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype994, _size991) = iprot.readListBegin() - for _i995 in xrange(_size991): - _elem996 = HiveObjectPrivilege() - _elem996.read(iprot) - self.success.append(_elem996) + (_etype1001, _size998) = iprot.readListBegin() + for _i1002 in xrange(_size998): + _elem1003 = HiveObjectPrivilege() + _elem1003.read(iprot) + self.success.append(_elem1003) iprot.readListEnd() else: iprot.skip(ftype) @@ -30655,8 +30887,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter997 in self.success: - iter997.write(oprot) + for iter1004 in self.success: + iter1004.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -31154,10 +31386,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1001, _size998) = iprot.readListBegin() - for _i1002 in xrange(_size998): - _elem1003 = iprot.readString() - self.group_names.append(_elem1003) + (_etype1008, _size1005) = iprot.readListBegin() + for _i1009 in xrange(_size1005): + _elem1010 = iprot.readString() + self.group_names.append(_elem1010) iprot.readListEnd() else: iprot.skip(ftype) @@ -31178,8 +31410,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1004 in self.group_names: - oprot.writeString(iter1004) + for iter1011 in self.group_names: + oprot.writeString(iter1011) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31234,10 +31466,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1008, _size1005) = iprot.readListBegin() - for _i1009 in xrange(_size1005): - _elem1010 = iprot.readString() - self.success.append(_elem1010) + (_etype1015, _size1012) = iprot.readListBegin() + for _i1016 in xrange(_size1012): + _elem1017 = iprot.readString() + self.success.append(_elem1017) iprot.readListEnd() else: iprot.skip(ftype) @@ -31260,8 +31492,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1011 in self.success: - oprot.writeString(iter1011) + for iter1018 in self.success: + oprot.writeString(iter1018) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -32193,10 +32425,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1015, _size1012) = iprot.readListBegin() - for _i1016 in xrange(_size1012): - _elem1017 = iprot.readString() - self.success.append(_elem1017) + (_etype1022, _size1019) = iprot.readListBegin() + for _i1023 in xrange(_size1019): + _elem1024 = iprot.readString() + self.success.append(_elem1024) iprot.readListEnd() else: iprot.skip(ftype) @@ -32213,8 +32445,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1018 in self.success: - oprot.writeString(iter1018) + for iter1025 in self.success: + oprot.writeString(iter1025) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -32741,10 +32973,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1022, _size1019) = iprot.readListBegin() - for _i1023 in xrange(_size1019): - _elem1024 = iprot.readString() - self.success.append(_elem1024) + (_etype1029, _size1026) = iprot.readListBegin() + for _i1030 in xrange(_size1026): + _elem1031 = iprot.readString() + self.success.append(_elem1031) iprot.readListEnd() else: iprot.skip(ftype) @@ -32761,8 +32993,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1025 in self.success: - oprot.writeString(iter1025) + for iter1032 in self.success: + oprot.writeString(iter1032) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -34629,14 +34861,533 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class show_compact_result: +class show_compact_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (ShowCompactResponse, ShowCompactResponse.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ShowCompactResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('show_compact_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class add_dynamic_partitions_args: + """ + Attributes: + - rqst + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (AddDynamicPartitions, AddDynamicPartitions.thrift_spec), None, ), # 1 + ) + + def __init__(self, rqst=None,): + self.rqst = rqst + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = AddDynamicPartitions() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('add_dynamic_partitions_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.rqst) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class add_dynamic_partitions_result: + """ + Attributes: + - o1 + - o2 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 + ) + + def __init__(self, o1=None, o2=None,): + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchTxnException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = TxnAbortedException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('add_dynamic_partitions_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_next_notification_args: + """ + Attributes: + - rqst + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (NotificationEventRequest, NotificationEventRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, rqst=None,): + self.rqst = rqst + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = NotificationEventRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_next_notification_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.rqst) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_next_notification_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (NotificationEventResponse, NotificationEventResponse.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = NotificationEventResponse() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_next_notification_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_current_notificationEventId_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_current_notificationEventId_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_current_notificationEventId_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (CurrentNotificationEventId, CurrentNotificationEventId.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = CurrentNotificationEventId() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_current_notificationEventId_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class fire_listener_event_args: + """ + Attributes: + - rqst + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'rqst', (FireEventRequest, FireEventRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, rqst=None,): + self.rqst = rqst + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.rqst = FireEventRequest() + self.rqst.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('fire_listener_event_args') + if self.rqst is not None: + oprot.writeFieldBegin('rqst', TType.STRUCT, 1) + self.rqst.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.rqst) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class fire_listener_event_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (ShowCompactResponse, ShowCompactResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (FireEventResponse, FireEventResponse.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -34653,7 +35404,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = ShowCompactResponse() + self.success = FireEventResponse() self.success.read(iprot) else: iprot.skip(ftype) @@ -34666,7 +35417,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('show_compact_result') + oprot.writeStructBegin('fire_listener_event_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -34694,20 +35445,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_dynamic_partitions_args: - """ - Attributes: - - rqst - """ +class flushCache_args: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'rqst', (AddDynamicPartitions, AddDynamicPartitions.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34717,12 +35459,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.rqst = AddDynamicPartitions() - self.rqst.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34732,11 +35468,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_dynamic_partitions_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('flushCache_args') oprot.writeFieldStop() oprot.writeStructEnd() @@ -34746,7 +35478,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) return value def __repr__(self): @@ -34760,23 +35491,11 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class add_dynamic_partitions_result: - """ - Attributes: - - o1 - - o2 - """ +class flushCache_result: thrift_spec = ( - None, # 0 - (1, TType.STRUCT, 'o1', (NoSuchTxnException, NoSuchTxnException.thrift_spec), None, ), # 1 - (2, TType.STRUCT, 'o2', (TxnAbortedException, TxnAbortedException.thrift_spec), None, ), # 2 ) - def __init__(self, o1=None, o2=None,): - self.o1 = o1 - self.o2 = o2 - def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34786,18 +35505,6 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break - if fid == 1: - if ftype == TType.STRUCT: - self.o1 = NoSuchTxnException() - self.o1.read(iprot) - else: - iprot.skip(ftype) - elif fid == 2: - if ftype == TType.STRUCT: - self.o2 = TxnAbortedException() - self.o2.read(iprot) - else: - iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34807,15 +35514,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('add_dynamic_partitions_result') - if self.o1 is not None: - oprot.writeFieldBegin('o1', TType.STRUCT, 1) - self.o1.write(oprot) - oprot.writeFieldEnd() - if self.o2 is not None: - oprot.writeFieldBegin('o2', TType.STRUCT, 2) - self.o2.write(oprot) - oprot.writeFieldEnd() + oprot.writeStructBegin('flushCache_result') oprot.writeFieldStop() oprot.writeStructEnd() @@ -34825,8 +35524,6 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.o1) - value = (value * 31) ^ hash(self.o2) return value def __repr__(self): @@ -34840,19 +35537,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_next_notification_args: +class get_file_metadata_by_expr_args: """ Attributes: - - rqst + - req """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (NotificationEventRequest, NotificationEventRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (GetFileMetadataByExprRequest, GetFileMetadataByExprRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -34865,8 +35562,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = NotificationEventRequest() - self.rqst.read(iprot) + self.req = GetFileMetadataByExprRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -34878,10 +35575,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_next_notification_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('get_file_metadata_by_expr_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -34892,7 +35589,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -34906,14 +35603,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_next_notification_result: +class get_file_metadata_by_expr_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (NotificationEventResponse, NotificationEventResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (GetFileMetadataByExprResult, GetFileMetadataByExprResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -34930,7 +35627,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = NotificationEventResponse() + self.success = GetFileMetadataByExprResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -34943,7 +35640,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_next_notification_result') + oprot.writeStructBegin('get_file_metadata_by_expr_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -34971,11 +35668,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_current_notificationEventId_args: +class get_file_metadata_args: + """ + Attributes: + - req + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (GetFileMetadataRequest, GetFileMetadataRequest.thrift_spec), None, ), # 1 ) + def __init__(self, req=None,): + self.req = req + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -34985,6 +35691,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.req = GetFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -34994,7 +35706,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_current_notificationEventId_args') + oprot.writeStructBegin('get_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35004,6 +35720,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -35017,14 +35734,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_current_notificationEventId_result: +class get_file_metadata_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (CurrentNotificationEventId, CurrentNotificationEventId.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (GetFileMetadataResult, GetFileMetadataResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -35041,7 +35758,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = CurrentNotificationEventId() + self.success = GetFileMetadataResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -35054,7 +35771,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_current_notificationEventId_result') + oprot.writeStructBegin('get_file_metadata_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -35082,19 +35799,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class fire_listener_event_args: +class put_file_metadata_args: """ Attributes: - - rqst + - req """ thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'rqst', (FireEventRequest, FireEventRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (PutFileMetadataRequest, PutFileMetadataRequest.thrift_spec), None, ), # 1 ) - def __init__(self, rqst=None,): - self.rqst = rqst + def __init__(self, req=None,): + self.req = req def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -35107,8 +35824,8 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.rqst = FireEventRequest() - self.rqst.read(iprot) + self.req = PutFileMetadataRequest() + self.req.read(iprot) else: iprot.skip(ftype) else: @@ -35120,10 +35837,10 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('fire_listener_event_args') - if self.rqst is not None: - oprot.writeFieldBegin('rqst', TType.STRUCT, 1) - self.rqst.write(oprot) + oprot.writeStructBegin('put_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35134,7 +35851,7 @@ def validate(self): def __hash__(self): value = 17 - value = (value * 31) ^ hash(self.rqst) + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -35148,14 +35865,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class fire_listener_event_result: +class put_file_metadata_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (FireEventResponse, FireEventResponse.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (PutFileMetadataResult, PutFileMetadataResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -35172,7 +35889,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = FireEventResponse() + self.success = PutFileMetadataResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -35185,7 +35902,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('fire_listener_event_result') + oprot.writeStructBegin('put_file_metadata_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -35213,11 +35930,20 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class flushCache_args: +class clear_file_metadata_args: + """ + Attributes: + - req + """ thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (ClearFileMetadataRequest, ClearFileMetadataRequest.thrift_spec), None, ), # 1 ) + def __init__(self, req=None,): + self.req = req + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -35227,6 +35953,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 1: + if ftype == TType.STRUCT: + self.req = ClearFileMetadataRequest() + self.req.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35236,7 +35968,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('flushCache_args') + oprot.writeStructBegin('clear_file_metadata_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35246,6 +35982,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.req) return value def __repr__(self): @@ -35259,11 +35996,19 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class flushCache_result: +class clear_file_metadata_result: + """ + Attributes: + - success + """ thrift_spec = ( + (0, TType.STRUCT, 'success', (ClearFileMetadataResult, ClearFileMetadataResult.thrift_spec), None, ), # 0 ) + def __init__(self, success=None,): + self.success = success + def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) @@ -35273,6 +36018,12 @@ def read(self, iprot): (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break + if fid == 0: + if ftype == TType.STRUCT: + self.success = ClearFileMetadataResult() + self.success.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -35282,7 +36033,11 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('flushCache_result') + oprot.writeStructBegin('clear_file_metadata_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -35292,6 +36047,7 @@ def validate(self): def __hash__(self): value = 17 + value = (value * 31) ^ hash(self.success) return value def __repr__(self): @@ -35305,7 +36061,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_by_expr_args: +class cache_file_metadata_args: """ Attributes: - req @@ -35313,7 +36069,7 @@ class get_file_metadata_by_expr_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (GetFileMetadataByExprRequest, GetFileMetadataByExprRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (CacheFileMetadataRequest, CacheFileMetadataRequest.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): @@ -35330,7 +36086,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = GetFileMetadataByExprRequest() + self.req = CacheFileMetadataRequest() self.req.read(iprot) else: iprot.skip(ftype) @@ -35343,7 +36099,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_by_expr_args') + oprot.writeStructBegin('cache_file_metadata_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) @@ -35371,14 +36127,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_by_expr_result: +class cache_file_metadata_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetFileMetadataByExprResult, GetFileMetadataByExprResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (CacheFileMetadataResult, CacheFileMetadataResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -35395,7 +36151,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = GetFileMetadataByExprResult() + self.success = CacheFileMetadataResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -35408,7 +36164,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_by_expr_result') + oprot.writeStructBegin('cache_file_metadata_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -35436,7 +36192,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_args: +class get_next_write_id_args: """ Attributes: - req @@ -35444,7 +36200,7 @@ class get_file_metadata_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (GetFileMetadataRequest, GetFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (GetNextWriteIdRequest, GetNextWriteIdRequest.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): @@ -35461,7 +36217,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = GetFileMetadataRequest() + self.req = GetNextWriteIdRequest() self.req.read(iprot) else: iprot.skip(ftype) @@ -35474,7 +36230,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_args') + oprot.writeStructBegin('get_next_write_id_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) @@ -35502,14 +36258,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class get_file_metadata_result: +class get_next_write_id_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (GetFileMetadataResult, GetFileMetadataResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (GetNextWriteIdResult, GetNextWriteIdResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -35526,7 +36282,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = GetFileMetadataResult() + self.success = GetNextWriteIdResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -35539,7 +36295,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('get_file_metadata_result') + oprot.writeStructBegin('get_next_write_id_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -35567,7 +36323,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class put_file_metadata_args: +class finalize_write_id_args: """ Attributes: - req @@ -35575,7 +36331,7 @@ class put_file_metadata_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (PutFileMetadataRequest, PutFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (FinalizeWriteIdRequest, FinalizeWriteIdRequest.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): @@ -35592,7 +36348,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = PutFileMetadataRequest() + self.req = FinalizeWriteIdRequest() self.req.read(iprot) else: iprot.skip(ftype) @@ -35605,7 +36361,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('put_file_metadata_args') + oprot.writeStructBegin('finalize_write_id_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) @@ -35633,14 +36389,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class put_file_metadata_result: +class finalize_write_id_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (PutFileMetadataResult, PutFileMetadataResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (FinalizeWriteIdResult, FinalizeWriteIdResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -35657,7 +36413,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = PutFileMetadataResult() + self.success = FinalizeWriteIdResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -35670,7 +36426,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('put_file_metadata_result') + oprot.writeStructBegin('finalize_write_id_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -35698,7 +36454,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class clear_file_metadata_args: +class heartbeat_write_id_args: """ Attributes: - req @@ -35706,7 +36462,7 @@ class clear_file_metadata_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (ClearFileMetadataRequest, ClearFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (HeartbeatWriteIdRequest, HeartbeatWriteIdRequest.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): @@ -35723,7 +36479,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = ClearFileMetadataRequest() + self.req = HeartbeatWriteIdRequest() self.req.read(iprot) else: iprot.skip(ftype) @@ -35736,7 +36492,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('clear_file_metadata_args') + oprot.writeStructBegin('heartbeat_write_id_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) @@ -35764,14 +36520,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class clear_file_metadata_result: +class heartbeat_write_id_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (ClearFileMetadataResult, ClearFileMetadataResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (HeartbeatWriteIdResult, HeartbeatWriteIdResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -35788,7 +36544,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = ClearFileMetadataResult() + self.success = HeartbeatWriteIdResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -35801,7 +36557,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('clear_file_metadata_result') + oprot.writeStructBegin('heartbeat_write_id_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) @@ -35829,7 +36585,7 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cache_file_metadata_args: +class get_valid_write_ids_args: """ Attributes: - req @@ -35837,7 +36593,7 @@ class cache_file_metadata_args: thrift_spec = ( None, # 0 - (1, TType.STRUCT, 'req', (CacheFileMetadataRequest, CacheFileMetadataRequest.thrift_spec), None, ), # 1 + (1, TType.STRUCT, 'req', (GetValidWriteIdsRequest, GetValidWriteIdsRequest.thrift_spec), None, ), # 1 ) def __init__(self, req=None,): @@ -35854,7 +36610,7 @@ def read(self, iprot): break if fid == 1: if ftype == TType.STRUCT: - self.req = CacheFileMetadataRequest() + self.req = GetValidWriteIdsRequest() self.req.read(iprot) else: iprot.skip(ftype) @@ -35867,7 +36623,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cache_file_metadata_args') + oprot.writeStructBegin('get_valid_write_ids_args') if self.req is not None: oprot.writeFieldBegin('req', TType.STRUCT, 1) self.req.write(oprot) @@ -35895,14 +36651,14 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class cache_file_metadata_result: +class get_valid_write_ids_result: """ Attributes: - success """ thrift_spec = ( - (0, TType.STRUCT, 'success', (CacheFileMetadataResult, CacheFileMetadataResult.thrift_spec), None, ), # 0 + (0, TType.STRUCT, 'success', (GetValidWriteIdsResult, GetValidWriteIdsResult.thrift_spec), None, ), # 0 ) def __init__(self, success=None,): @@ -35919,7 +36675,7 @@ def read(self, iprot): break if fid == 0: if ftype == TType.STRUCT: - self.success = CacheFileMetadataResult() + self.success = GetValidWriteIdsResult() self.success.read(iprot) else: iprot.skip(ftype) @@ -35932,7 +36688,7 @@ def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return - oprot.writeStructBegin('cache_file_metadata_result') + oprot.writeStructBegin('get_valid_write_ids_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py b/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py index 5100236afa24..6232737eb55d 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/constants.py @@ -33,3 +33,4 @@ TABLE_IS_TRANSACTIONAL = "transactional" TABLE_NO_AUTO_COMPACT = "no_auto_compaction" TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties" +TABLE_IS_MM = "hivecommit" diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 879fc99a0ce2..b90de431b46c 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -3122,6 +3122,8 @@ class Table: - tableType - privileges - temporary + - mmNextWriteId + - mmWatermarkWriteId """ thrift_spec = ( @@ -3140,9 +3142,11 @@ class Table: (12, TType.STRING, 'tableType', None, None, ), # 12 (13, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 13 (14, TType.BOOL, 'temporary', None, False, ), # 14 + (15, TType.I64, 'mmNextWriteId', None, None, ), # 15 + (16, TType.I64, 'mmWatermarkWriteId', None, None, ), # 16 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], mmNextWriteId=None, mmWatermarkWriteId=None,): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -3157,6 +3161,8 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.tableType = tableType self.privileges = privileges self.temporary = temporary + self.mmNextWriteId = mmNextWriteId + self.mmWatermarkWriteId = mmWatermarkWriteId def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -3251,6 +3257,16 @@ def read(self, iprot): self.temporary = iprot.readBool() else: iprot.skip(ftype) + elif fid == 15: + if ftype == TType.I64: + self.mmNextWriteId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 16: + if ftype == TType.I64: + self.mmWatermarkWriteId = iprot.readI64() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -3324,6 +3340,14 @@ def write(self, oprot): oprot.writeFieldBegin('temporary', TType.BOOL, 14) oprot.writeBool(self.temporary) oprot.writeFieldEnd() + if self.mmNextWriteId is not None: + oprot.writeFieldBegin('mmNextWriteId', TType.I64, 15) + oprot.writeI64(self.mmNextWriteId) + oprot.writeFieldEnd() + if self.mmWatermarkWriteId is not None: + oprot.writeFieldBegin('mmWatermarkWriteId', TType.I64, 16) + oprot.writeI64(self.mmWatermarkWriteId) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -3347,6 +3371,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.tableType) value = (value * 31) ^ hash(self.privileges) value = (value * 31) ^ hash(self.temporary) + value = (value * 31) ^ hash(self.mmNextWriteId) + value = (value * 31) ^ hash(self.mmWatermarkWriteId) return value def __repr__(self): @@ -12230,19 +12256,22 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) -class GetAllFunctionsResponse: +class GetNextWriteIdRequest: """ Attributes: - - functions + - dbName + - tblName """ thrift_spec = ( None, # 0 - (1, TType.LIST, 'functions', (TType.STRUCT,(Function, Function.thrift_spec)), None, ), # 1 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tblName', None, None, ), # 2 ) - def __init__(self, functions=None,): - self.functions = functions + def __init__(self, dbName=None, tblName=None,): + self.dbName = dbName + self.tblName = tblName def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -12254,13 +12283,658 @@ def read(self, iprot): if ftype == TType.STOP: break if fid == 1: - if ftype == TType.LIST: - self.functions = [] - (_etype562, _size559) = iprot.readListBegin() - for _i563 in xrange(_size559): - _elem564 = Function() - _elem564.read(iprot) - self.functions.append(_elem564) + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetNextWriteIdRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeString(self.tblName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tblName is None: + raise TProtocol.TProtocolException(message='Required field tblName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tblName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetNextWriteIdResult: + """ + Attributes: + - writeId + """ + + thrift_spec = ( + None, # 0 + (1, TType.I64, 'writeId', None, None, ), # 1 + ) + + def __init__(self, writeId=None,): + self.writeId = writeId + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetNextWriteIdResult') + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 1) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.writeId is None: + raise TProtocol.TProtocolException(message='Required field writeId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.writeId) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class FinalizeWriteIdRequest: + """ + Attributes: + - dbName + - tblName + - writeId + - commit + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tblName', None, None, ), # 2 + (3, TType.I64, 'writeId', None, None, ), # 3 + (4, TType.BOOL, 'commit', None, None, ), # 4 + ) + + def __init__(self, dbName=None, tblName=None, writeId=None, commit=None,): + self.dbName = dbName + self.tblName = tblName + self.writeId = writeId + self.commit = commit + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.BOOL: + self.commit = iprot.readBool() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('FinalizeWriteIdRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeString(self.tblName) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 3) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + if self.commit is not None: + oprot.writeFieldBegin('commit', TType.BOOL, 4) + oprot.writeBool(self.commit) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tblName is None: + raise TProtocol.TProtocolException(message='Required field tblName is unset!') + if self.writeId is None: + raise TProtocol.TProtocolException(message='Required field writeId is unset!') + if self.commit is None: + raise TProtocol.TProtocolException(message='Required field commit is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tblName) + value = (value * 31) ^ hash(self.writeId) + value = (value * 31) ^ hash(self.commit) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class FinalizeWriteIdResult: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('FinalizeWriteIdResult') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class HeartbeatWriteIdRequest: + """ + Attributes: + - dbName + - tblName + - writeId + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tblName', None, None, ), # 2 + (3, TType.I64, 'writeId', None, None, ), # 3 + ) + + def __init__(self, dbName=None, tblName=None, writeId=None,): + self.dbName = dbName + self.tblName = tblName + self.writeId = writeId + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I64: + self.writeId = iprot.readI64() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('HeartbeatWriteIdRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeString(self.tblName) + oprot.writeFieldEnd() + if self.writeId is not None: + oprot.writeFieldBegin('writeId', TType.I64, 3) + oprot.writeI64(self.writeId) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tblName is None: + raise TProtocol.TProtocolException(message='Required field tblName is unset!') + if self.writeId is None: + raise TProtocol.TProtocolException(message='Required field writeId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tblName) + value = (value * 31) ^ hash(self.writeId) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class HeartbeatWriteIdResult: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('HeartbeatWriteIdResult') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetValidWriteIdsRequest: + """ + Attributes: + - dbName + - tblName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'dbName', None, None, ), # 1 + (2, TType.STRING, 'tblName', None, None, ), # 2 + ) + + def __init__(self, dbName=None, tblName=None,): + self.dbName = dbName + self.tblName = tblName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.dbName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tblName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetValidWriteIdsRequest') + if self.dbName is not None: + oprot.writeFieldBegin('dbName', TType.STRING, 1) + oprot.writeString(self.dbName) + oprot.writeFieldEnd() + if self.tblName is not None: + oprot.writeFieldBegin('tblName', TType.STRING, 2) + oprot.writeString(self.tblName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.dbName is None: + raise TProtocol.TProtocolException(message='Required field dbName is unset!') + if self.tblName is None: + raise TProtocol.TProtocolException(message='Required field tblName is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.dbName) + value = (value * 31) ^ hash(self.tblName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetValidWriteIdsResult: + """ + Attributes: + - lowWatermarkId + - highWatermarkId + - areIdsValid + - ids + """ + + thrift_spec = ( + None, # 0 + (1, TType.I64, 'lowWatermarkId', None, None, ), # 1 + (2, TType.I64, 'highWatermarkId', None, None, ), # 2 + (3, TType.BOOL, 'areIdsValid', None, None, ), # 3 + (4, TType.LIST, 'ids', (TType.I64,None), None, ), # 4 + ) + + def __init__(self, lowWatermarkId=None, highWatermarkId=None, areIdsValid=None, ids=None,): + self.lowWatermarkId = lowWatermarkId + self.highWatermarkId = highWatermarkId + self.areIdsValid = areIdsValid + self.ids = ids + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I64: + self.lowWatermarkId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.highWatermarkId = iprot.readI64() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.BOOL: + self.areIdsValid = iprot.readBool() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.LIST: + self.ids = [] + (_etype562, _size559) = iprot.readListBegin() + for _i563 in xrange(_size559): + _elem564 = iprot.readI64() + self.ids.append(_elem564) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetValidWriteIdsResult') + if self.lowWatermarkId is not None: + oprot.writeFieldBegin('lowWatermarkId', TType.I64, 1) + oprot.writeI64(self.lowWatermarkId) + oprot.writeFieldEnd() + if self.highWatermarkId is not None: + oprot.writeFieldBegin('highWatermarkId', TType.I64, 2) + oprot.writeI64(self.highWatermarkId) + oprot.writeFieldEnd() + if self.areIdsValid is not None: + oprot.writeFieldBegin('areIdsValid', TType.BOOL, 3) + oprot.writeBool(self.areIdsValid) + oprot.writeFieldEnd() + if self.ids is not None: + oprot.writeFieldBegin('ids', TType.LIST, 4) + oprot.writeListBegin(TType.I64, len(self.ids)) + for iter565 in self.ids: + oprot.writeI64(iter565) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.lowWatermarkId is None: + raise TProtocol.TProtocolException(message='Required field lowWatermarkId is unset!') + if self.highWatermarkId is None: + raise TProtocol.TProtocolException(message='Required field highWatermarkId is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.lowWatermarkId) + value = (value * 31) ^ hash(self.highWatermarkId) + value = (value * 31) ^ hash(self.areIdsValid) + value = (value * 31) ^ hash(self.ids) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetAllFunctionsResponse: + """ + Attributes: + - functions + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'functions', (TType.STRUCT,(Function, Function.thrift_spec)), None, ), # 1 + ) + + def __init__(self, functions=None,): + self.functions = functions + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.functions = [] + (_etype569, _size566) = iprot.readListBegin() + for _i570 in xrange(_size566): + _elem571 = Function() + _elem571.read(iprot) + self.functions.append(_elem571) iprot.readListEnd() else: iprot.skip(ftype) @@ -12277,8 +12951,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter565 in self.functions: - iter565.write(oprot) + for iter572 in self.functions: + iter572.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb index 6aa7143c76b0..118a54edd179 100644 --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_constants.rb @@ -55,3 +55,5 @@ TABLE_TRANSACTIONAL_PROPERTIES = %q"transactional_properties" +TABLE_IS_MM = %q"hivecommit" + diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index cf644239af5f..9cfcd648aec9 100644 --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -728,6 +728,8 @@ class Table TABLETYPE = 12 PRIVILEGES = 13 TEMPORARY = 14 + MMNEXTWRITEID = 15 + MMWATERMARKWRITEID = 16 FIELDS = { TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, @@ -743,7 +745,9 @@ class Table VIEWEXPANDEDTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewExpandedText'}, TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'}, PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, - TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true} + TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true}, + MMNEXTWRITEID => {:type => ::Thrift::Types::I64, :name => 'mmNextWriteId', :optional => true}, + MMWATERMARKWRITEID => {:type => ::Thrift::Types::I64, :name => 'mmWatermarkWriteId', :optional => true} } def struct_fields; FIELDS; end @@ -2762,6 +2766,166 @@ def validate ::Thrift::Struct.generate_accessors self end +class GetNextWriteIdRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBLNAME = 2 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName + end + + ::Thrift::Struct.generate_accessors self +end + +class GetNextWriteIdResult + include ::Thrift::Struct, ::Thrift::Struct_Union + WRITEID = 1 + + FIELDS = { + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId + end + + ::Thrift::Struct.generate_accessors self +end + +class FinalizeWriteIdRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBLNAME = 2 + WRITEID = 3 + COMMIT = 4 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'}, + COMMIT => {:type => ::Thrift::Types::BOOL, :name => 'commit'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field commit is unset!') if @commit.nil? + end + + ::Thrift::Struct.generate_accessors self +end + +class FinalizeWriteIdResult + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class HeartbeatWriteIdRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBLNAME = 2 + WRITEID = 3 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'}, + WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field writeId is unset!') unless @writeId + end + + ::Thrift::Struct.generate_accessors self +end + +class HeartbeatWriteIdResult + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class GetValidWriteIdsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + DBNAME = 1 + TBLNAME = 2 + + FIELDS = { + DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'}, + TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName + end + + ::Thrift::Struct.generate_accessors self +end + +class GetValidWriteIdsResult + include ::Thrift::Struct, ::Thrift::Struct_Union + LOWWATERMARKID = 1 + HIGHWATERMARKID = 2 + AREIDSVALID = 3 + IDS = 4 + + FIELDS = { + LOWWATERMARKID => {:type => ::Thrift::Types::I64, :name => 'lowWatermarkId'}, + HIGHWATERMARKID => {:type => ::Thrift::Types::I64, :name => 'highWatermarkId'}, + AREIDSVALID => {:type => ::Thrift::Types::BOOL, :name => 'areIdsValid', :optional => true}, + IDS => {:type => ::Thrift::Types::LIST, :name => 'ids', :element => {:type => ::Thrift::Types::I64}, :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field lowWatermarkId is unset!') unless @lowWatermarkId + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field highWatermarkId is unset!') unless @highWatermarkId + end + + ::Thrift::Struct.generate_accessors self +end + class GetAllFunctionsResponse include ::Thrift::Struct, ::Thrift::Struct_Union FUNCTIONS = 1 diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 76a33396c806..d647182932b4 100644 --- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -2500,6 +2500,66 @@ def recv_cache_file_metadata() raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'cache_file_metadata failed: unknown result') end + def get_next_write_id(req) + send_get_next_write_id(req) + return recv_get_next_write_id() + end + + def send_get_next_write_id(req) + send_message('get_next_write_id', Get_next_write_id_args, :req => req) + end + + def recv_get_next_write_id() + result = receive_message(Get_next_write_id_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_next_write_id failed: unknown result') + end + + def finalize_write_id(req) + send_finalize_write_id(req) + return recv_finalize_write_id() + end + + def send_finalize_write_id(req) + send_message('finalize_write_id', Finalize_write_id_args, :req => req) + end + + def recv_finalize_write_id() + result = receive_message(Finalize_write_id_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'finalize_write_id failed: unknown result') + end + + def heartbeat_write_id(req) + send_heartbeat_write_id(req) + return recv_heartbeat_write_id() + end + + def send_heartbeat_write_id(req) + send_message('heartbeat_write_id', Heartbeat_write_id_args, :req => req) + end + + def recv_heartbeat_write_id() + result = receive_message(Heartbeat_write_id_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'heartbeat_write_id failed: unknown result') + end + + def get_valid_write_ids(req) + send_get_valid_write_ids(req) + return recv_get_valid_write_ids() + end + + def send_get_valid_write_ids(req) + send_message('get_valid_write_ids', Get_valid_write_ids_args, :req => req) + end + + def recv_get_valid_write_ids() + result = receive_message(Get_valid_write_ids_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_valid_write_ids failed: unknown result') + end + end class Processor < ::FacebookService::Processor @@ -4357,6 +4417,34 @@ def process_cache_file_metadata(seqid, iprot, oprot) write_result(result, oprot, 'cache_file_metadata', seqid) end + def process_get_next_write_id(seqid, iprot, oprot) + args = read_args(iprot, Get_next_write_id_args) + result = Get_next_write_id_result.new() + result.success = @handler.get_next_write_id(args.req) + write_result(result, oprot, 'get_next_write_id', seqid) + end + + def process_finalize_write_id(seqid, iprot, oprot) + args = read_args(iprot, Finalize_write_id_args) + result = Finalize_write_id_result.new() + result.success = @handler.finalize_write_id(args.req) + write_result(result, oprot, 'finalize_write_id', seqid) + end + + def process_heartbeat_write_id(seqid, iprot, oprot) + args = read_args(iprot, Heartbeat_write_id_args) + result = Heartbeat_write_id_result.new() + result.success = @handler.heartbeat_write_id(args.req) + write_result(result, oprot, 'heartbeat_write_id', seqid) + end + + def process_get_valid_write_ids(seqid, iprot, oprot) + args = read_args(iprot, Get_valid_write_ids_args) + result = Get_valid_write_ids_result.new() + result.success = @handler.get_valid_write_ids(args.req) + write_result(result, oprot, 'get_valid_write_ids', seqid) + end + end # HELPER FUNCTIONS AND STRUCTURES @@ -9995,5 +10083,133 @@ def validate ::Thrift::Struct.generate_accessors self end + class Get_next_write_id_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::GetNextWriteIdRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_next_write_id_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetNextWriteIdResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Finalize_write_id_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::FinalizeWriteIdRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Finalize_write_id_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::FinalizeWriteIdResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Heartbeat_write_id_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::HeartbeatWriteIdRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Heartbeat_write_id_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::HeartbeatWriteIdResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_valid_write_ids_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::GetValidWriteIdsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_valid_write_ids_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::GetValidWriteIdsResult} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + end diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 530d2f486de9..4436f3a56fab 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.*; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.events.AddIndexEvent; import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; @@ -82,6 +81,7 @@ import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent; import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -121,6 +121,7 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.sql.SQLException; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.AbstractMap; @@ -136,6 +137,7 @@ import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Random; import java.util.Set; import java.util.Timer; import java.util.concurrent.Callable; @@ -183,11 +185,10 @@ protected DateFormat initialValue() { }; }; - /** - * default port on which to start the Hive server - */ public static final String ADMIN = "admin"; public static final String PUBLIC = "public"; + /** MM write states. */ + public static final char MM_WRITE_OPEN = 'o', MM_WRITE_COMMITTED = 'c', MM_WRITE_ABORTED = 'a'; private static HadoopThriftAuthBridge.Server saslServer; private static HiveDelegationTokenManager delegationTokenManager; @@ -441,19 +442,19 @@ public void init() throws MetaException { updateMetrics(); LOG.info("Finished metadata count metrics: " + initDatabaseCount + " databases, " + initTableCount + " tables, " + initPartCount + " partitions."); - metrics.addGauge(MetricsConstant.INIT_TOTAL_DATABASES, new MetricsVariable() { + metrics.addGauge(MetricsConstant.INIT_TOTAL_DATABASES, new MetricsVariable() { @Override public Object getValue() { return initDatabaseCount; } }); - metrics.addGauge(MetricsConstant.INIT_TOTAL_TABLES, new MetricsVariable() { + metrics.addGauge(MetricsConstant.INIT_TOTAL_TABLES, new MetricsVariable() { @Override public Object getValue() { return initTableCount; } }); - metrics.addGauge(MetricsConstant.INIT_TOTAL_PARTITIONS, new MetricsVariable() { + metrics.addGauge(MetricsConstant.INIT_TOTAL_PARTITIONS, new MetricsVariable() { @Override public Object getValue() { return initPartCount; @@ -1246,13 +1247,7 @@ public Type get_type(final String name) throws MetaException, NoSuchObjectExcept } } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("get_type", ret != null, ex); } @@ -1264,26 +1259,6 @@ private boolean is_type_exists(RawStore ms, String typeName) return (ms.getType(typeName) != null); } - private void drop_type_core(final RawStore ms, String typeName) - throws NoSuchObjectException, MetaException { - boolean success = false; - try { - ms.openTransaction(); - // drop any partitions - if (!is_type_exists(ms, typeName)) { - throw new NoSuchObjectException(typeName + " doesn't exist"); - } - if (!ms.dropType(typeName)) { - throw new MetaException("Unable to drop type " + typeName); - } - success = ms.commitTransaction(); - } finally { - if (!success) { - ms.rollbackTransaction(); - } - } - } - @Override public boolean drop_type(final String name) throws MetaException, NoSuchObjectException { startFunction("drop_type", ": " + name); @@ -1295,13 +1270,7 @@ public boolean drop_type(final String name) throws MetaException, NoSuchObjectEx success = getMS().dropType(name); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("drop_type", success, ex); } @@ -1824,7 +1793,7 @@ private List dropPartitionsAndGetLocations(RawStore ms, String dbName, //No drop part listener events fired for public listeners historically, for drop table case. //Limiting to internal listeners for now, to avoid unexpected calls for public listeners. if (listener instanceof HMSMetricsListener) { - for (Partition part : partsToDelete) { + for (@SuppressWarnings("unused") Partition part : partsToDelete) { listener.onDropPartition(null); } } @@ -1856,13 +1825,7 @@ public void drop_table_with_environment_context(final String dbname, final Strin throw new MetaException(e.getMessage()); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("drop_table", success, ex, name); } @@ -1934,7 +1897,7 @@ public List get_table_meta(String dbnames, String tblNames, List add_partitions_core(final RawStore ms, } - partFutures.add(threadPool.submit(new Callable() { + partFutures.add(threadPool.submit(new Callable() { @Override public Partition call() throws Exception { boolean madeDir = createLocationForAddedPartition(table, part); @@ -2474,8 +2431,8 @@ private int add_partitions_pspec_core( LOG.info("Not adding partition " + part + " as it already exists"); continue; } - partFutures.add(threadPool.submit(new Callable() { - @Override public Object call() throws Exception { + partFutures.add(threadPool.submit(new Callable() { + @Override public Partition call() throws Exception { boolean madeDir = createLocationForAddedPartition(table, part); if (addedPartitions.put(new PartValEqWrapperLite(part), madeDir) != null) { // Technically, for ifNotExists case, we could insert one and discard the other @@ -2492,7 +2449,7 @@ private int add_partitions_pspec_core( try { for (Future partFuture : partFutures) { - Partition part = partFuture.get(); + partFuture.get(); } } catch (InterruptedException | ExecutionException e) { // cancel other tasks @@ -3109,13 +3066,7 @@ public Partition get_partition(final String db_name, final String tbl_name, ret = getMS().getPartition(db_name, tbl_name, part_vals); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("get_partition", ret != null, ex, tbl_name); } @@ -3181,13 +3132,7 @@ public List get_partitions(final String db_name, final String tbl_nam ret = getMS().getPartitions(db_name, tbl_name, max_parts); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("get_partitions", ret != null, ex, tbl_name); } @@ -3829,6 +3774,7 @@ public List get_fields_with_environment_context(String db, String t } } + @SuppressWarnings("deprecation") Deserializer s = MetaStoreUtils.getDeserializer(curConf, tbl, false); ret = MetaStoreUtils.getFieldsFromDeserializer(tableName, s); } catch (SerDeException e) { @@ -5800,7 +5746,7 @@ public boolean partition_name_has_valid_characters(List part_vals, throw newMetaException(e); } } - endFunction("partition_name_has_valid_characters", true, null); + endFunction("partition_name_has_valid_characters", true, ex); return ret; } @@ -6099,21 +6045,6 @@ public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( return new GetRoleGrantsForPrincipalResponse(roleMaps); } - /** - * Convert each MRoleMap object into a thrift RolePrincipalGrant object - * @param roles - * @return - */ - private List getRolePrincipalGrants(List roles) throws MetaException { - List rolePrinGrantList = new ArrayList(); - if (roles != null) { - for (Role role : roles) { - rolePrinGrantList.addAll(getMS().listRoleMembers(role.getRoleName())); - } - } - return rolePrinGrantList; - } - @Override public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws NoSuchObjectException, MetaException, TException { @@ -6462,13 +6393,7 @@ public PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request) ret = getMS().getPrimaryKeys(db_name, tbl_name); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("get_primary_keys", ret != null, ex, tbl_name); } @@ -6492,18 +6417,224 @@ public ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request) throws M foreign_db_name, foreign_tbl_name); } catch (Exception e) { ex = e; - if (e instanceof MetaException) { - throw (MetaException) e; - } else if (e instanceof NoSuchObjectException) { - throw (NoSuchObjectException) e; - } else { - throw newMetaException(e); - } + throwMetaException(e); } finally { endFunction("get_foreign_keys", ret != null, ex, foreign_tbl_name); } return new ForeignKeysResponse(ret); } + + private void throwMetaException(Exception e) throws MetaException, + NoSuchObjectException { + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } + + private final Random random = new Random(); + @Override + public GetNextWriteIdResult get_next_write_id(GetNextWriteIdRequest req) throws TException { + RawStore ms = getMS(); + String dbName = req.getDbName(), tblName = req.getTblName(); + startFunction("get_next_write_id", " : db=" + dbName + " tbl=" + tblName); + Exception exception = null; + long writeId = -1; + try { + int deadlockTryCount = 10; + int deadlockRetryBackoffMs = 200; + while (deadlockTryCount > 0) { + boolean ok = false; + ms.openTransaction(); + try { + Table tbl = ms.getTable(dbName, tblName); + if (tbl == null) { + throw new NoSuchObjectException(dbName + "." + tblName); + } + writeId = tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : 0; + tbl.setMmNextWriteId(writeId + 1); + ms.alterTable(dbName, tblName, tbl); + ok = true; + } finally { + if (!ok) { + ms.rollbackTransaction(); + // Exception should propagate; don't override it by breaking out of the loop. + } else { + Boolean commitResult = ms.commitTransactionExpectDeadlock(); + if (commitResult != null) { + if (commitResult) break; // Assume no exception; ok to break out of the loop. + throw new MetaException("Failed to commit"); + } + } + } + LOG.warn("Getting the next write ID failed due to a deadlock; retrying"); + Thread.sleep(random.nextInt(deadlockRetryBackoffMs)); + } + + // Do a separate txn after we have reserved the number. + boolean ok = false; + ms.openTransaction(); + try { + Table tbl = ms.getTable(dbName, tblName); + ms.createTableWrite(tbl, writeId, MM_WRITE_OPEN, System.currentTimeMillis()); + ok = true; + } finally { + commitOrRollback(ms, ok); + } + } catch (Exception e) { + exception = e; + throwMetaException(e); + } finally { + endFunction("get_next_write_id", exception == null, exception, tblName); + } + return new GetNextWriteIdResult(writeId); + } + + @Override + public FinalizeWriteIdResult finalize_write_id(FinalizeWriteIdRequest req) throws TException { + RawStore ms = getMS(); + String dbName = req.getDbName(), tblName = req.getTblName(); + long writeId = req.getWriteId(); + boolean commit = req.isCommit(); + startFunction("finalize_write_id", " : db=" + dbName + " tbl=" + tblName + + " writeId=" + writeId + " commit=" + commit); + Exception ex = null; + try { + boolean ok = false; + ms.openTransaction(); + try { + MTableWrite tw = getActiveTableWrite(ms, dbName, tblName, writeId); + tw.setState(String.valueOf(commit ? MM_WRITE_COMMITTED : MM_WRITE_ABORTED)); + ms.updateTableWrite(tw); + ok = true; + } finally { + commitOrRollback(ms, ok); + } + } catch (Exception e) { + ex = e; + throwMetaException(e); + } finally { + endFunction("finalize_write_id", ex == null, ex, tblName); + } + return new FinalizeWriteIdResult(); + } + + private void commitOrRollback(RawStore ms, boolean ok) throws MetaException { + if (ok) { + if (!ms.commitTransaction()) throw new MetaException("Failed to commit"); + } else { + ms.rollbackTransaction(); + } + } + + @Override + public HeartbeatWriteIdResult heartbeat_write_id(HeartbeatWriteIdRequest req) + throws TException { + RawStore ms = getMS(); + String dbName = req.getDbName(), tblName = req.getTblName(); + long writeId = req.getWriteId(); + startFunction("heartbeat_write_id", " : db=" + + dbName + " tbl=" + tblName + " writeId=" + writeId); + Exception ex = null; + boolean wasAborted = false; + try { + boolean ok = false; + ms.openTransaction(); + try { + MTableWrite tw = getActiveTableWrite(ms, dbName, tblName, writeId); + long absTimeout = HiveConf.getTimeVar(getConf(), + ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT, TimeUnit.MILLISECONDS); + if (tw.getCreated() + absTimeout < System.currentTimeMillis()) { + tw.setState(String.valueOf(MM_WRITE_ABORTED)); + wasAborted = true; + } + tw.setLastHeartbeat(System.currentTimeMillis()); + ms.updateTableWrite(tw); + ok = true; + } finally { + commitOrRollback(ms, ok); + } + } catch (Exception e) { + ex = e; + throwMetaException(e); + } finally { + endFunction("heartbeat_write_id", ex == null, ex, tblName); + } + if (wasAborted) throw new MetaException("The write was aborted due to absolute timeout"); + return new HeartbeatWriteIdResult(); + } + + private MTableWrite getActiveTableWrite(RawStore ms, String dbName, + String tblName, long writeId) throws MetaException { + MTableWrite tw = ms.getTableWrite(dbName, tblName, writeId); + if (tw == null) { + return null; + } + assert tw.getState().length() == 1; + char state = tw.getState().charAt(0); + if (state != MM_WRITE_OPEN) { + throw new MetaException("Invalid write state: " + state); + } + return tw; + } + + @Override + public GetValidWriteIdsResult get_valid_write_ids( + GetValidWriteIdsRequest req) throws TException { + RawStore ms = getMS(); + String dbName = req.getDbName(), tblName = req.getTblName(); + startFunction("get_valid_write_ids", " : db=" + dbName + " tbl=" + tblName); + GetValidWriteIdsResult result = new GetValidWriteIdsResult(); + Exception ex = null; + try { + boolean ok = false; + ms.openTransaction(); + try { + Table tbl = ms.getTable(dbName, tblName); + if (tbl == null) { + throw new InvalidObjectException(dbName + "." + tblName); + } + long nextId = tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : 0; + long watermarkId = tbl.isSetMmWatermarkWriteId() ? tbl.getMmWatermarkWriteId() : -1; + if (nextId > (watermarkId + 1)) { + // There may be some intermediate failed or active writes; get the valid ones. + List ids = ms.getTableWriteIds( + dbName, tblName, watermarkId, nextId, MM_WRITE_COMMITTED); + // TODO: we could optimize here and send the smaller of the lists, and also use ranges + if (!ids.isEmpty()) { + Iterator iter = ids.iterator(); + long oldWatermarkId = watermarkId; + while (iter.hasNext()) { + if (iter.next() != watermarkId + 1) break; + ++watermarkId; + } + long removed = watermarkId - oldWatermarkId; + if (removed > 0) { + ids = ids.subList((int)removed, ids.size()); + } + if (!ids.isEmpty()) { + result.setIds(ids); + result.setAreIdsValid(true); + } + } + } + result.setHighWatermarkId(nextId); + result.setLowWatermarkId(watermarkId); + ok = true; + } finally { + commitOrRollback(ms, ok); + } + } catch (Exception e) { + ex = e; + throwMetaException(e); + } finally { + endFunction("get_valid_write_ids", ex == null, ex, tblName); + } + return result; + } } @@ -6951,6 +7082,7 @@ public void run() { startCompactorInitiator(conf); startCompactorWorkers(conf); startCompactorCleaner(conf); + startMmHousekeepingThread(conf); startHouseKeeperService(conf); } catch (Throwable e) { LOG.error("Failure when starting the compactor, compactions may not happen, " + @@ -6990,8 +7122,18 @@ private static void startCompactorCleaner(HiveConf conf) throws Exception { } } + private static void startMmHousekeepingThread(HiveConf conf) throws Exception { + long intervalMs = HiveConf.getTimeVar(conf, + ConfVars.HIVE_METASTORE_MM_THREAD_SCAN_INTERVAL, TimeUnit.MILLISECONDS); + if (intervalMs > 0) { + MetaStoreThread thread = new MmCleanerThread(intervalMs); + initializeAndStartThread(thread, conf); + } + } + + private static MetaStoreThread instantiateThread(String classname) throws Exception { - Class c = Class.forName(classname); + Class c = Class.forName(classname); Object o = c.newInstance(); if (MetaStoreThread.class.isAssignableFrom(o.getClass())) { return (MetaStoreThread)o; @@ -7012,6 +7154,7 @@ private static void initializeAndStartThread(MetaStoreThread thread, HiveConf co thread.init(new AtomicBoolean(), new AtomicBoolean()); thread.start(); } + private static void startHouseKeeperService(HiveConf conf) throws Exception { if(!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) { return; @@ -7020,7 +7163,7 @@ private static void startHouseKeeperService(HiveConf conf) throws Exception { startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidCompactionHistoryService")); startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidWriteSetService")); } - private static void startHouseKeeperService(HiveConf conf, Class c) throws Exception { + private static void startHouseKeeperService(HiveConf conf, Class c) throws Exception { //todo: when metastore adds orderly-shutdown logic, houseKeeper.stop() //should be called form it HouseKeeperService houseKeeper = (HouseKeeperService)c.newInstance(); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index c32486fdd1c8..73e14c49ad7b 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -27,105 +27,8 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveConfUtil; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; -import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; -import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; -import org.apache.hadoop.hive.metastore.api.AddForeignKeyRequest; -import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest; -import org.apache.hadoop.hive.metastore.api.AddPartitionsResult; -import org.apache.hadoop.hive.metastore.api.AddPrimaryKeyRequest; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.CacheFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.CacheFileMetadataResult; -import org.apache.hadoop.hive.metastore.api.CheckLockRequest; -import org.apache.hadoop.hive.metastore.api.ClearFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; -import org.apache.hadoop.hive.metastore.api.CompactionRequest; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; -import org.apache.hadoop.hive.metastore.api.DataOperationType; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.DropConstraintRequest; -import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr; -import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FireEventRequest; -import org.apache.hadoop.hive.metastore.api.FireEventResponse; -import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprRequest; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprResult; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.GetFileMetadataResult; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest; -import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; -import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest; -import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse; -import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest; -import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleResponse; -import org.apache.hadoop.hive.metastore.api.GrantRevokeType; -import org.apache.hadoop.hive.metastore.api.HeartbeatRequest; -import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest; -import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.InvalidInputException; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; -import org.apache.hadoop.hive.metastore.api.LockRequest; -import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; -import org.apache.hadoop.hive.metastore.api.NoSuchLockException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; -import org.apache.hadoop.hive.metastore.api.NotificationEvent; -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; -import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; -import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.PartitionEventType; -import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest; -import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult; -import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest; -import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.PutFileMetadataRequest; -import org.apache.hadoop.hive.metastore.api.RequestPartsSpec; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; -import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableMeta; -import org.apache.hadoop.hive.metastore.api.TableStatsRequest; -import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; -import org.apache.hadoop.hive.metastore.api.TxnAbortedException; -import org.apache.hadoop.hive.metastore.api.TxnOpenException; -import org.apache.hadoop.hive.metastore.api.Type; -import org.apache.hadoop.hive.metastore.api.UnknownDBException; -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.UnlockRequest; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.shims.ShimLoader; @@ -2496,4 +2399,27 @@ public boolean cacheFileMetadata( CacheFileMetadataResult result = client.cache_file_metadata(req); return result.isIsSupported(); } + + @Override + public long getNextTableWriteId(String dbName, String tableName) throws TException { + return client.get_next_write_id(new GetNextWriteIdRequest(dbName, tableName)).getWriteId(); + } + + @Override + public void finalizeTableWrite( + String dbName, String tableName, long writeId, boolean commit) throws TException { + client.finalize_write_id(new FinalizeWriteIdRequest(dbName, tableName, writeId, commit)); + } + + @Override + public void heartbeatTableWrite( + String dbName, String tableName, long writeId) throws TException { + client.heartbeat_write_id(new HeartbeatWriteIdRequest(dbName, tableName, writeId)); + } + + @Override + public GetValidWriteIdsResult getValidWriteIds( + String dbName, String tableName) throws TException { + return client.get_valid_write_ids(new GetValidWriteIdsRequest(dbName, tableName)); + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index b77055905a04..3245bb88f98e 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; +import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResult; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; @@ -1634,4 +1635,13 @@ void addPrimaryKey(List primaryKeyCols) throws void addForeignKey(List foreignKeyCols) throws MetaException, NoSuchObjectException, TException; + + long getNextTableWriteId(String dbName, String tableName) throws TException; + + void heartbeatTableWrite(String dbName, String tableName, long writeId) throws TException; + + void finalizeTableWrite(String dbName, String tableName, long writeId, + boolean commit) throws TException; + + GetValidWriteIdsResult getValidWriteIds(String dbName, String tableName) throws TException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 561f3e3708a0..125a3e51209a 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -113,15 +113,8 @@ class MetaStoreDirectSql { private final boolean isAggregateStatsCacheEnabled; private AggregateStatsCache aggrStatsCache; - public MetaStoreDirectSql(PersistenceManager pm, Configuration conf) { + public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, DatabaseProduct dbType) { this.pm = pm; - DatabaseProduct dbType = null; - try { - dbType = DatabaseProduct.determineDatabaseProduct(getProductName()); - } catch (SQLException e) { - LOG.warn("Cannot determine database product; assuming OTHER", e); - dbType = DatabaseProduct.OTHER; - } this.dbType = dbType; int batchSize = HiveConf.getIntVar(conf, ConfVars.METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE); if (batchSize == DETECT_BATCHING) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java index a0c8d3b0e3c5..d4d94ffe4bd2 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java @@ -51,6 +51,7 @@ public interface MetaStoreThread { * thread should then assure that the loop has been gone completely through at * least once. */ + // TODO: move these test parameters to more specific places... there's no need to have them here void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException; /** diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java index 41385f7108f4..28fcfa8727b3 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java @@ -1884,4 +1884,20 @@ public static void mergeColStats(ColumnStatistics csNew, ColumnStatistics csOld) } csNew.setStatsObj(list); } + + public static boolean isMmTable(Table table) { + return isMmTable(table.getParameters()); + } + + public static boolean isMmTable(Map params) { + // TODO: perhaps it should be a 3rd value for 'transactional'? + String value = params.get(hive_metastoreConstants.TABLE_IS_MM); + return value != null && value.equalsIgnoreCase("true"); + } + + public static boolean isMmTable(Properties params) { + // TODO: perhaps it should be a 3rd value for 'transactional'? + String value = params.getProperty(hive_metastoreConstants.TABLE_IS_MM); + return value != null && value.equalsIgnoreCase("true"); + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java new file mode 100644 index 000000000000..d99b0d7c3acd --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MmCleanerThread.java @@ -0,0 +1,397 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.ValidWriteIds; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.RawStore.FullTableName; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.model.MTableWrite; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Supplier; + +public class MmCleanerThread extends Thread implements MetaStoreThread { + private final static Logger LOG = LoggerFactory.getLogger(MmCleanerThread.class); + private HiveConf conf; + private int threadId; + private AtomicBoolean stop; + private long intervalMs; + private long heartbeatTimeoutMs, absTimeoutMs, abortedGraceMs; + /** Time override for tests. Only used for MM timestamp logic, not for the thread timing. */ + private Supplier timeOverride = null; + + public MmCleanerThread(long intervalMs) { + this.intervalMs = intervalMs; + } + + @VisibleForTesting + void overrideTime(Supplier timeOverride) { + this.timeOverride = timeOverride; + } + + private long getTimeMs() { + return timeOverride == null ? System.currentTimeMillis() : timeOverride.get(); + } + + @Override + public void setHiveConf(HiveConf conf) { + this.conf = conf; + heartbeatTimeoutMs = HiveConf.getTimeVar( + conf, ConfVars.HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT, TimeUnit.MILLISECONDS); + absTimeoutMs = HiveConf.getTimeVar( + conf, ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT, TimeUnit.MILLISECONDS); + abortedGraceMs = HiveConf.getTimeVar( + conf, ConfVars.HIVE_METASTORE_MM_ABORTED_GRACE_PERIOD, TimeUnit.MILLISECONDS); + if (heartbeatTimeoutMs > absTimeoutMs) { + throw new RuntimeException("Heartbeat timeout " + heartbeatTimeoutMs + + " cannot be larger than the absolute timeout " + absTimeoutMs); + } + } + + @Override + public void setThreadId(int threadId) { + this.threadId = threadId; + } + + @Override + public void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException { + this.stop = stop; + setPriority(MIN_PRIORITY); + setDaemon(true); + } + + @Override + public void run() { + // Only get RS here, when we are already on the thread. + RawStore rs = getRs(); + while (true) { + if (checkStop()) return; + long endTimeNs = System.nanoTime() + intervalMs * 1000000L; + + runOneIteration(rs); + + if (checkStop()) return; + long waitTimeMs = (endTimeNs - System.nanoTime()) / 1000000L; + if (waitTimeMs <= 0) continue; + try { + Thread.sleep(waitTimeMs); + } catch (InterruptedException e) { + LOG.error("Thread was interrupted and will now exit"); + return; + } + } + } + + private RawStore getRs() { + try { + return RawStoreProxy.getProxy(conf, conf, + conf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL), threadId); + } catch (MetaException e) { + LOG.error("Failed to get RawStore; the thread will now die", e); + throw new RuntimeException(e); + } + } + + private boolean checkStop() { + if (!stop.get()) return false; + LOG.info("Stopping due to an external request"); + return true; + } + + @VisibleForTesting + void runOneIteration(RawStore rs) { + // We only get the names here; we want to get and process each table in a separate DB txn. + List mmTables = null; + try { + mmTables = rs.getAllMmTablesForCleanup(); + } catch (MetaException e) { + LOG.error("Failed to get tables", e); + return; + } + for (FullTableName tableName : mmTables) { + try { + processOneTable(tableName, rs); + } catch (MetaException e) { + LOG.error("Failed to process " + tableName, e); + } + } + } + + private void processOneTable(FullTableName table, RawStore rs) throws MetaException { + // 1. Time out writes that have been running for a while. + // a) Heartbeat timeouts (not enabled right now as heartbeat is not implemented). + // b) Absolute timeouts. + // c) Gaps that have the next ID and the derived absolute timeout. This is a small special + // case that can happen if we increment next ID but fail to insert the write ID record, + // which we do in separate txns to avoid making the conflict-prone increment txn longer. + LOG.info("Processing table " + table); + Table t = rs.getTable(table.dbName, table.tblName); + HashSet removeWriteIds = new HashSet<>(), cleanupOnlyWriteIds = new HashSet<>(); + getWritesThatReadyForCleanUp(t, table, rs, removeWriteIds, cleanupOnlyWriteIds); + + // 2. Delete the aborted writes' files from the FS. + deleteAbortedWriteIdFiles(table, rs, t, removeWriteIds); + deleteAbortedWriteIdFiles(table, rs, t, cleanupOnlyWriteIds); + // removeWriteIds-s now only contains the writes that were fully cleaned up after. + + // 3. Advance the watermark. + advanceWatermark(table, rs, removeWriteIds); + } + + private void getWritesThatReadyForCleanUp(Table t, FullTableName table, RawStore rs, + HashSet removeWriteIds, HashSet cleanupOnlyWriteIds) throws MetaException { + // We will generally ignore errors here. First, we expect some conflicts; second, we will get + // the final view of things after we do (or try, at any rate) all the updates. + long watermarkId = t.isSetMmWatermarkWriteId() ? t.getMmWatermarkWriteId() : -1, + nextWriteId = t.isSetMmNextWriteId() ? t.getMmNextWriteId() : 0; + long now = getTimeMs(), earliestOkHeartbeatMs = now - heartbeatTimeoutMs, + earliestOkCreateMs = now - absTimeoutMs, latestAbortedMs = now - abortedGraceMs; + + List writes = rs.getTableWrites( + table.dbName, table.tblName, watermarkId, nextWriteId); + ListIterator iter = writes.listIterator(writes.size()); + long expectedId = -1, nextCreated = -1; + // We will go in reverse order and add aborted writes for the gaps that have a following + // write ID that would imply that the previous one (created earlier) would have already + // expired, had it been open and not updated. + while (iter.hasPrevious()) { + MTableWrite write = iter.previous(); + addTimedOutMissingWriteIds(rs, table.dbName, table.tblName, write.getWriteId(), + nextCreated, expectedId, earliestOkHeartbeatMs, cleanupOnlyWriteIds, now); + expectedId = write.getWriteId() - 1; + nextCreated = write.getCreated(); + char state = write.getState().charAt(0); + if (state == HiveMetaStore.MM_WRITE_ABORTED) { + if (write.getLastHeartbeat() < latestAbortedMs) { + removeWriteIds.add(write.getWriteId()); + } else { + cleanupOnlyWriteIds.add(write.getWriteId()); + } + } else if (state == HiveMetaStore.MM_WRITE_OPEN && write.getCreated() < earliestOkCreateMs) { + // TODO: also check for heartbeat here. + if (expireTimedOutWriteId(rs, table.dbName, table.tblName, write.getWriteId(), + now, earliestOkCreateMs, earliestOkHeartbeatMs, cleanupOnlyWriteIds)) { + cleanupOnlyWriteIds.add(write.getWriteId()); + } + } + } + addTimedOutMissingWriteIds(rs, table.dbName, table.tblName, watermarkId, + nextCreated, expectedId, earliestOkHeartbeatMs, cleanupOnlyWriteIds, now); + } + + private void advanceWatermark( + FullTableName table, RawStore rs, HashSet cleanedUpWriteIds) { + if (!rs.openTransaction()) { + LOG.error("Cannot open transaction"); + return; + } + boolean success = false; + try { + Table t = rs.getTable(table.dbName, table.tblName); + if (t == null) { + return; + } + long watermarkId = t.getMmWatermarkWriteId(); + List writeIds = rs.getTableWriteIds(table.dbName, table.tblName, watermarkId, + t.getMmNextWriteId(), HiveMetaStore.MM_WRITE_COMMITTED); + long expectedId = watermarkId + 1; + boolean hasGap = false; + Iterator idIter = writeIds.iterator(); + while (idIter.hasNext()) { + long next = idIter.next(); + if (next < expectedId) continue; + while (next > expectedId) { + if (!cleanedUpWriteIds.contains(expectedId)) { + hasGap = true; + break; + } + ++expectedId; + } + if (hasGap) break; + ++expectedId; + } + // Make sure we also advance over the trailing aborted ones. + if (!hasGap) { + while (cleanedUpWriteIds.contains(expectedId)) { + ++expectedId; + } + } + long newWatermarkId = expectedId - 1; + if (newWatermarkId > watermarkId) { + t.setMmWatermarkWriteId(newWatermarkId); + rs.alterTable(table.dbName, table.tblName, t); + rs.deleteTableWrites(table.dbName, table.tblName, -1, expectedId); + } + success = true; + } catch (Exception ex) { + // TODO: should we try a couple times on conflicts? Aborted writes cannot be unaborted. + LOG.error("Failed to advance watermark", ex); + rs.rollbackTransaction(); + } + if (success) { + tryCommit(rs); + } + } + + private void deleteAbortedWriteIdFiles( + FullTableName table, RawStore rs, Table t, HashSet cleanUpWriteIds) { + if (cleanUpWriteIds.isEmpty()) return; + if (t.getPartitionKeysSize() > 0) { + for (String location : rs.getAllPartitionLocations(table.dbName, table.tblName)) { + deleteAbortedWriteIdFiles(location, cleanUpWriteIds); + } + } else { + deleteAbortedWriteIdFiles(t.getSd().getLocation(), cleanUpWriteIds); + } + } + + private void deleteAbortedWriteIdFiles(String location, HashSet abortedWriteIds) { + LOG.info("Looking for " + abortedWriteIds.size() + " aborted write output in " + location); + Path path = new Path(location); + FileSystem fs; + FileStatus[] files; + try { + fs = path.getFileSystem(conf); + if (!fs.exists(path)) { + LOG.warn(path + " does not exist; assuming that the cleanup is not needed."); + return; + } + // TODO# this doesn't account for list bucketing. Do nothing now, ACID will solve all problems. + files = fs.listStatus(path); + } catch (Exception ex) { + LOG.error("Failed to get files for " + path + "; cannot ensure cleanup for any writes"); + abortedWriteIds.clear(); + return; + } + for (FileStatus file : files) { + Path childPath = file.getPath(); + if (!file.isDirectory()) { + LOG.warn("Skipping a non-directory file " + childPath); + continue; + } + Long writeId = ValidWriteIds.extractWriteId(childPath); + if (writeId == null) { + LOG.warn("Skipping an unknown directory " + childPath); + continue; + } + if (!abortedWriteIds.contains(writeId.longValue())) continue; + try { + if (!fs.delete(childPath, true)) throw new IOException("delete returned false"); + } catch (Exception ex) { + LOG.error("Couldn't delete " + childPath + "; not cleaning up " + writeId, ex); + abortedWriteIds.remove(writeId.longValue()); + } + } + } + + private boolean expireTimedOutWriteId(RawStore rs, String dbName, + String tblName, long writeId, long now, long earliestOkCreatedMs, + long earliestOkHeartbeatMs, HashSet cleanupOnlyWriteIds) { + if (!rs.openTransaction()) { + return false; + } + try { + MTableWrite tw = rs.getTableWrite(dbName, tblName, writeId); + if (tw == null) { + // The write have been updated since the time when we thought it has expired. + tryCommit(rs); + return true; + } + char state = tw.getState().charAt(0); + if (state != HiveMetaStore.MM_WRITE_OPEN + || (tw.getCreated() > earliestOkCreatedMs + && tw.getLastHeartbeat() > earliestOkHeartbeatMs)) { + tryCommit(rs); + return true; // The write has been updated since the time when we thought it has expired. + } + tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_ABORTED)); + tw.setLastHeartbeat(now); + rs.updateTableWrite(tw); + } catch (Exception ex) { + LOG.error("Failed to update an expired table write", ex); + rs.rollbackTransaction(); + return false; + } + boolean result = tryCommit(rs); + if (result) { + cleanupOnlyWriteIds.add(writeId); + } + return result; + } + + private boolean tryCommit(RawStore rs) { + try { + return rs.commitTransaction(); + } catch (Exception ex) { + LOG.error("Failed to commit transaction", ex); + return false; + } + } + + private boolean addTimedOutMissingWriteIds(RawStore rs, String dbName, String tblName, + long foundPrevId, long nextCreated, long expectedId, long earliestOkHeartbeatMs, + HashSet cleanupOnlyWriteIds, long now) throws MetaException { + // Assume all missing ones are created at the same time as the next present write ID. + // We also assume missing writes never had any heartbeats. + if (nextCreated >= earliestOkHeartbeatMs || expectedId < 0) return true; + Table t = null; + List localCleanupOnlyWriteIds = new ArrayList<>(); + while (foundPrevId < expectedId) { + if (t == null && !rs.openTransaction()) { + LOG.error("Cannot open transaction; skipping"); + return false; + } + try { + if (t == null) { + t = rs.getTable(dbName, tblName); + } + // We don't need to double check if the write exists; the unique index will cause an error. + rs.createTableWrite(t, expectedId, HiveMetaStore.MM_WRITE_ABORTED, now); + } catch (Exception ex) { + // TODO: don't log conflict exceptions?.. although we barely ever expect them. + LOG.error("Failed to create a missing table write", ex); + rs.rollbackTransaction(); + return false; + } + localCleanupOnlyWriteIds.add(expectedId); + --expectedId; + } + boolean result = (t == null || tryCommit(rs)); + if (result) { + cleanupOnlyWriteIds.addAll(localCleanupOnlyWriteIds); + } + return result; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 90ea641c8d93..c679b3566409 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -25,6 +25,8 @@ import java.net.InetAddress; import java.net.URI; import java.nio.ByteBuffer; +import java.sql.Connection; +import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -54,6 +56,7 @@ import javax.jdo.Query; import javax.jdo.Transaction; import javax.jdo.datastore.DataStoreCache; +import javax.jdo.datastore.JDOConnection; import javax.jdo.identity.IntIdentity; import com.google.common.collect.Maps; @@ -144,6 +147,7 @@ import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege; import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; import org.apache.hadoop.hive.metastore.model.MTablePrivilege; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.model.MType; import org.apache.hadoop.hive.metastore.model.MVersionTable; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; @@ -221,6 +225,7 @@ private static enum TXN_STATUS { private boolean isInitialized = false; private PersistenceManager pm = null; private MetaStoreDirectSql directSql = null; + private DatabaseProduct dbType = null; private PartitionExpressionProxy expressionProxy = null; private Configuration hiveConf; private volatile int openTrasactionCalls = 0; @@ -399,15 +404,37 @@ private void initializeHelper(Properties dsProps) { pm = getPersistenceManager(); isInitialized = pm != null; if (isInitialized) { + dbType = determineDatabaseProduct(); expressionProxy = createExpressionProxy(hiveConf); if (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL)) { - directSql = new MetaStoreDirectSql(pm, hiveConf); + directSql = new MetaStoreDirectSql(pm, hiveConf, dbType); } } LOG.debug("RawStore: " + this + ", with PersistenceManager: " + pm + " created in the thread with id: " + Thread.currentThread().getId()); } + private DatabaseProduct determineDatabaseProduct() { + try { + return DatabaseProduct.determineDatabaseProduct(getProductName(pm)); + } catch (SQLException e) { + LOG.warn("Cannot determine database product; assuming OTHER", e); + return DatabaseProduct.OTHER; + } + } + + private static String getProductName(PersistenceManager pm) { + JDOConnection jdoConn = pm.getDataStoreConnection(); + try { + return ((Connection)jdoConn.getNativeConnection()).getMetaData().getDatabaseProductName(); + } catch (Throwable t) { + LOG.warn("Error retrieving product name", t); + return null; + } finally { + jdoConn.close(); // We must release the connection before we call other pm methods. + } + } + /** * Creates the proxy used to evaluate expressions. This is here to prevent circular * dependency - ql -> metastore client <-> metastore server -> ql. If server and @@ -581,15 +608,52 @@ public boolean openTransaction() { return result; } - /** - * if this is the commit of the first open call then an actual commit is - * called. - * - * @return Always returns true - */ @Override @SuppressWarnings("nls") public boolean commitTransaction() { + if (!startCommitTransaction()) return false; + + openTrasactionCalls--; + debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive()); + if ((openTrasactionCalls == 0) && currentTransaction.isActive()) { + transactionStatus = TXN_STATUS.COMMITED; + currentTransaction.commit(); + } + + return true; + } + + @Override + @CanNotRetry + public Boolean commitTransactionExpectDeadlock() { + if (!startCommitTransaction()) return false; + + if (--openTrasactionCalls != 0) { + String msg = "commitTransactionExpectDeadlock cannot be called for a nested transaction"; + LOG.error(msg); + throw new AssertionError(msg); + } + + transactionStatus = TXN_STATUS.COMMITED; + try { + currentTransaction.commit(); + } catch (Exception ex) { + Throwable candidate = ex; + while (candidate != null && !(candidate instanceof SQLException)) { + candidate = candidate.getCause(); + } + if (candidate == null) throw ex; + if (DatabaseProduct.isDeadlock(dbType, (SQLException)candidate)) { + LOG.info("Deadlock exception during commit: " + candidate.getMessage()); + return null; + } + throw ex; + } + + return true; + } + + private boolean startCommitTransaction() { if (TXN_STATUS.ROLLBACK == transactionStatus) { debugLog("Commit transaction: rollback"); return false; @@ -608,14 +672,6 @@ public boolean commitTransaction() { LOG.error("Unbalanced calls to open/commit Transaction", e); throw e; } - openTrasactionCalls--; - debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive()); - - if ((openTrasactionCalls == 0) && currentTransaction.isActive()) { - transactionStatus = TXN_STATUS.COMMITED; - currentTransaction.commit(); - } - return true; } @@ -726,7 +782,7 @@ public Database getDatabase(String name) throws NoSuchObjectException { } public Database getDatabaseInternal(String name) throws MetaException, NoSuchObjectException { - return new GetDbHelper(name, null, true, true) { + return new GetDbHelper(name, true, true) { @Override protected Database getSqlResult(GetHelper ctx) throws MetaException { return directSql.getDatabase(dbName); @@ -1124,6 +1180,11 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, pm.deletePersistentAll(tabConstraints); } + List tableWrites = listAllTableWrites(dbName, tableName); + if (tableWrites != null && tableWrites.size() > 0) { + pm.deletePersistentAll(tableWrites); + } + preDropStorageDescriptor(tbl.getSd()); // then remove the table pm.deletePersistentAll(tbl); @@ -1179,7 +1240,26 @@ public boolean dropTable(String dbName, String tableName) throws MetaException, return mConstraints; } -@Override + + private List listAllTableWrites(String dbName, String tableName) { + List result = null; + Query query = null; + boolean success = false; + openTransaction(); + try { + String queryStr = "table.tableName == t1 && table.database.name == t2"; + query = pm.newQuery(MTableWrite.class, queryStr); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + result = new ArrayList<>((List) query.executeWithArray(tableName, dbName)); + pm.retrieveAll(result); + success = true; + } finally { + closeTransaction(success, query); + } + return result; + } + + @Override public Table getTable(String dbName, String tableName) throws MetaException { boolean commited = false; Table tbl = null; @@ -1489,11 +1569,14 @@ private Table convertToTable(MTable mtbl) throws MetaException { tableType = TableType.MANAGED_TABLE.toString(); } } - return new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl + Table t = new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl .getRetention(), convertToStorageDescriptor(mtbl.getSd()), convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()), mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType); + t.setMmNextWriteId(mtbl.getMmNextWriteId()); + t.setMmWatermarkWriteId(mtbl.getMmWatermarkWriteId()); + return t; } private MTable convertToMTable(Table tbl) throws InvalidObjectException, @@ -1531,7 +1614,8 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), tbl.getViewOriginalText(), tbl.getViewExpandedText(), - tableType); + tableType, tbl.isSetMmNextWriteId() ? tbl.getMmNextWriteId() : 0, + tbl.isSetMmWatermarkWriteId() ? tbl.getMmWatermarkWriteId() : -1); } private List convertToMFieldSchemas(List keys) { @@ -2761,7 +2845,8 @@ public GetHelper(String dbName, String tblName, boolean allowSql, boolean allowJ boolean isConfigEnabled = HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL) && (HiveConf.getBoolVar(getConf(), ConfVars.METASTORE_TRY_DIRECT_SQL_DDL) || !isInTxn); if (isConfigEnabled && directSql == null) { - directSql = new MetaStoreDirectSql(pm, getConf()); + dbType = determineDatabaseProduct(); + directSql = new MetaStoreDirectSql(pm, getConf(), dbType); } if (!allowJdo && isConfigEnabled && !directSql.isCompatibleDatastore()) { @@ -2938,15 +3023,13 @@ protected String describeResult() { public abstract class GetDbHelper extends GetHelper { /** * GetHelper for returning db info using directSql/JDO. - * Since this is a db-level call, tblName is ignored, and null is passed irrespective of what is passed in. * @param dbName The Database Name - * @param tblName Placeholder param to match signature, always ignored. * @param allowSql Whether or not we allow DirectSQL to perform this query. * @param allowJdo Whether or not we allow ORM to perform this query. * @throws MetaException */ public GetDbHelper( - String dbName, String tblName, boolean allowSql, boolean allowJdo) throws MetaException { + String dbName,boolean allowSql, boolean allowJdo) throws MetaException { super(dbName,null,allowSql,allowJdo); } @@ -3297,6 +3380,8 @@ public void alterTable(String dbname, String name, Table newTable) oldt.setLastAccessTime(newt.getLastAccessTime()); oldt.setViewOriginalText(newt.getViewOriginalText()); oldt.setViewExpandedText(newt.getViewExpandedText()); + oldt.setMmNextWriteId(newt.getMmNextWriteId()); + oldt.setMmWatermarkWriteId(newt.getMmWatermarkWriteId()); // commit the changes success = commitTransaction(); @@ -8692,4 +8777,184 @@ public void dropConstraint(String dbName, String tableName, } } + @Override + public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) { + boolean success = false; + openTransaction(); + try { + MTable mtbl = getMTable(tbl.getDbName(), tbl.getTableName()); + MTableWrite tw = new MTableWrite(mtbl, writeId, String.valueOf(state), heartbeat, heartbeat); + pm.makePersistent(tw); + success = true; + } finally { + if (success) { + commitTransaction(); + } else { + rollbackTransaction(); + } + } + } + + @Override + public void updateTableWrite(MTableWrite tw) { + boolean success = false; + openTransaction(); + try { + pm.makePersistent(tw); + success = true; + } finally { + if (success) { + commitTransaction(); + } else { + rollbackTransaction(); + } + } + } + + @Override + public MTableWrite getTableWrite( + String dbName, String tblName, long writeId) throws MetaException { + boolean success = false; + Query query = null; + openTransaction(); + try { + query = pm.newQuery(MTableWrite.class, + "table.tableName == t1 && table.database.name == t2 && writeId == t3"); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3"); + @SuppressWarnings("unchecked") + List writes = (List) query.execute(tblName, dbName, writeId); + pm.retrieveAll(writes); + success = true; + if (writes == null || writes.isEmpty()) return null; + if (writes.size() > 1) { + throw new MetaException( + "More than one TableWrite for " + dbName + "." + tblName + " and " + writeId); + } + return writes.get(0); + } finally { + closeTransaction(success, query); + } + } + + @Override + public List getTableWriteIds(String dbName, String tblName, + long watermarkId, long nextWriteId, char state) throws MetaException { + boolean success = false; + Query query = null; + openTransaction(); + try { + boolean hasState = (state != '\0'); + query = pm.newQuery("select writeId from org.apache.hadoop.hive.metastore.model.MTableWrite" + + " where table.tableName == t1 && table.database.name == t2 && writeId > t3" + + " && writeId < t4" + (hasState ? " && state == t5" : "")); + query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.Long t3, " + + "java.lang.Long t4" + (hasState ? ", java.lang.String t5" : "")); + query.setResult("writeId"); + query.setOrdering("writeId asc"); + @SuppressWarnings("unchecked") + List writes = (List) (hasState + ? query.executeWithArray(tblName, dbName, watermarkId, nextWriteId, String.valueOf(state)) + : query.executeWithArray(tblName, dbName, watermarkId, nextWriteId)); + success = true; + return (writes == null) ? new ArrayList() : new ArrayList<>(writes); + } finally { + closeTransaction(success, query); + } + } + + @Override + public List getTableWrites( + String dbName, String tblName, long from, long to) throws MetaException { + boolean success = false; + Query query = null; + openTransaction(); + try { + query = pm.newQuery(MTableWrite.class, + "table.tableName == t1 && table.database.name == t2 && writeId > t3 && writeId < t4"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.Long t3, java.lang.Long t4"); + query.setOrdering("writeId asc"); + @SuppressWarnings("unchecked") + List writes = + (List) query.executeWithArray(tblName, dbName, from, to); + success = true; + return (writes == null || writes.isEmpty()) ? null : new ArrayList<>(writes); + } finally { + closeTransaction(success, query); + } + } + + + @Override + public void deleteTableWrites( + String dbName, String tblName, long from, long to) throws MetaException { + boolean success = false; + Query query = null; + openTransaction(); + try { + query = pm.newQuery(MTableWrite.class, + "table.tableName == t1 && table.database.name == t2 && writeId > t3 && writeId < t4"); + query.declareParameters( + "java.lang.String t1, java.lang.String t2, java.lang.Long t3, java.lang.Long t4"); + query.deletePersistentAll(tblName, dbName, from, to); + success = true; + } finally { + closeTransaction(success, query); + } + } + + @Override + public List getAllMmTablesForCleanup() throws MetaException { + boolean success = false; + Query query = null; + openTransaction(); + try { + // If the table had no MM writes, there's nothing to clean up + query = pm.newQuery(MTable.class, "mmNextWriteId > 0"); + @SuppressWarnings("unchecked") + List tables = (List) query.execute(); + pm.retrieveAll(tables); + ArrayList result = new ArrayList<>(tables.size()); + for (MTable table : tables) { + if (MetaStoreUtils.isMmTable(table.getParameters())) { + result.add(new FullTableName(table.getDatabase().getName(), table.getTableName())); + } + } + success = true; + return result; + } finally { + closeTransaction(success, query); + } + } + + @Override + public Collection getAllPartitionLocations(String dbName, String tblName) { + boolean success = false; + Query query = null; + openTransaction(); + try { + String q = "select sd.location from org.apache.hadoop.hive.metastore.model.MPartition" + + " where table.tableName == t1 && table.database.name == t2"; + query = pm.newQuery(); + query.declareParameters("java.lang.String t1, java.lang.String t2"); + @SuppressWarnings("unchecked") + List tables = (List) query.execute(); + pm.retrieveAll(tables); + success = true; + return new ArrayList<>(tables); + } finally { + closeTransaction(success, query); + } + } + + private void closeTransaction(boolean success, Query query) { + if (success) { + commitTransaction(); + } else { + rollbackTransaction(); + } + if (query != null) { + query.closeAll(); + } + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index a3dd4e5196c7..d0e7587b543d 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -23,6 +23,7 @@ import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; import java.nio.ByteBuffer; +import java.util.Collection; import java.util.List; import java.util.Map; @@ -60,6 +61,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -93,6 +95,15 @@ public interface RawStore extends Configurable { @CanNotRetry public abstract boolean commitTransaction(); + /** + * Commits transaction and detects if the failure to do so is a deadlock or not. + * Must be called on the top level with regard to openTransaction calls; attempting to + * call this after several nested openTransaction calls will throw. + * @return true or false - same as commitTransaction; null in case of deadlock. + */ + @CanNotRetry + public abstract Boolean commitTransactionExpectDeadlock(); + /** * Rolls back the current transaction if it is active */ @@ -684,4 +695,35 @@ void createTableWithConstraints(Table tbl, List primaryKeys, void addPrimaryKeys(List pks) throws InvalidObjectException, MetaException; void addForeignKeys(List fks) throws InvalidObjectException, MetaException; + + void updateTableWrite(MTableWrite tw); + + MTableWrite getTableWrite(String dbName, String tblName, long writeId) throws MetaException; + + void createTableWrite(Table tbl, long writeId, char state, long heartbeat); + + List getTableWriteIds(String dbName, String tblName, long watermarkId, long nextWriteId, char state) throws MetaException; + + + public static final class FullTableName { + public final String dbName, tblName; + + public FullTableName(String dbName, String tblName) { + this.dbName = dbName; + this.tblName = tblName; + } + + @Override + public String toString() { + return dbName + "." + tblName; + } + } + + List getAllMmTablesForCleanup() throws MetaException; + + public List getTableWrites(String dbName, String tblName, long from, long to) throws MetaException; + + Collection getAllPartitionLocations(String dbName, String tblName); + + void deleteTableWrites(String dbName, String tblName, long from, long to) throws MetaException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java index 0f08f434e706..f9424791f4e0 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java @@ -40,6 +40,7 @@ public final class TransactionalValidationListener extends MetaStorePreEventList // These constants are also imported by org.apache.hadoop.hive.ql.io.AcidUtils. public static final String DEFAULT_TRANSACTIONAL_PROPERTY = "default"; public static final String LEGACY_TRANSACTIONAL_PROPERTY = "legacy"; + public static final String INSERTONLY_TRANSACTIONAL_PROPERTY = "insert_only"; TransactionalValidationListener(Configuration conf) { super(conf); @@ -105,8 +106,11 @@ private void handleAlterTableTransactionalProp(PreAlterTableEvent context) throw } if ("true".equalsIgnoreCase(transactionalValue)) { if (!conformToAcid(newTable)) { - throw new MetaException("The table must be bucketed and stored using an ACID compliant" + - " format (such as ORC)"); + // INSERT_ONLY tables don't have to conform to ACID requirement like ORC or bucketing + if (transactionalPropertiesValue == null || !"insert_only".equalsIgnoreCase(transactionalPropertiesValue)) { + throw new MetaException("The table must be bucketed and stored using an ACID compliant" + + " format (such as ORC)"); + } } if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { @@ -172,32 +176,40 @@ private void handleCreateTableTransactionalProp(PreCreateTableEvent context) thr if (parameters == null || parameters.isEmpty()) { return; } - String transactionalValue = null; - boolean transactionalPropFound = false; + String transactional = null; + String transactionalProperties = null; Set keys = new HashSet<>(parameters.keySet()); for(String key : keys) { - if(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.equalsIgnoreCase(key)) { - transactionalPropFound = true; - transactionalValue = parameters.get(key); + // Get the "transactional" tblproperties value + if (hive_metastoreConstants.TABLE_IS_TRANSACTIONAL.equalsIgnoreCase(key)) { + transactional = parameters.get(key); parameters.remove(key); } + + // Get the "transactional_properties" tblproperties value + if (hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES.equalsIgnoreCase(key)) { + transactionalProperties = parameters.get(key); + } } - if (!transactionalPropFound) { + if (transactional == null) { return; } - if ("false".equalsIgnoreCase(transactionalValue)) { + if ("false".equalsIgnoreCase(transactional)) { // just drop transactional=false. For backward compatibility in case someone has scripts // with transactional=false LOG.info("'transactional'='false' is no longer a valid property and will be ignored"); return; } - if ("true".equalsIgnoreCase(transactionalValue)) { + if ("true".equalsIgnoreCase(transactional)) { if (!conformToAcid(newTable)) { - throw new MetaException("The table must be bucketed and stored using an ACID compliant" + - " format (such as ORC)"); + // INSERT_ONLY tables don't have to conform to ACID requirement like ORC or bucketing + if (transactionalProperties == null || !"insert_only".equalsIgnoreCase(transactionalProperties)) { + throw new MetaException("The table must be bucketed and stored using an ACID compliant" + + " format (such as ORC)"); + } } if (newTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) { @@ -211,7 +223,7 @@ private void handleCreateTableTransactionalProp(PreCreateTableEvent context) thr return; } - // transactional prop is found, but the value is not in expected range + // transactional is found, but the value is not in expected range throw new MetaException("'transactional' property of TBLPROPERTIES may only have value 'true'"); } @@ -277,6 +289,7 @@ private String validateTransactionalProperties(String transactionalProperties) { switch (transactionalProperties) { case DEFAULT_TRANSACTIONAL_PROPERTY: case LEGACY_TRANSACTIONAL_PROPERTY: + case INSERTONLY_TRANSACTIONAL_PROPERTY: isValid = true; break; default: diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index f9fad4cb6456..e5b469ba34c5 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hive.metastore.RawStore; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.RawStore.CanNotRetry; import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -73,6 +74,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownTableException; import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.PlanResult; import org.apache.hadoop.hive.metastore.hbase.HBaseFilterPlanUtil.ScanPlan; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; @@ -131,12 +133,26 @@ public boolean openTransaction() { @Override public boolean commitTransaction() { if (--txnNestLevel == 0) { - LOG.debug("Committing HBase transaction"); - getHBase().commit(); + commitInternal(); } return true; } + @Override + @CanNotRetry + public Boolean commitTransactionExpectDeadlock() { + if (--txnNestLevel != 0) { + throw new AssertionError("Cannot be called on a nested transaction"); + } + commitInternal(); + return true; + } + + private void commitInternal() { + LOG.debug("Committing HBase transaction"); + getHBase().commit(); + } + @Override public void rollbackTransaction() { txnNestLevel = 0; @@ -2728,4 +2744,57 @@ public void addForeignKeys(List fks) throws InvalidObjectException, MetaException { // TODO: WTF? } + + @Override + public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public void updateTableWrite(MTableWrite tw) { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + + @Override + public List getTableWriteIds( + String dbName, String tblName, long watermarkId, long nextWriteId, char state) { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public List getAllMmTablesForCleanup() throws MetaException { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public List getTableWrites(String dbName, String tblName, + long from, long to) throws MetaException { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public Collection getAllPartitionLocations(String dbName, + String tblName) { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } + + @Override + public void deleteTableWrites(String dbName, String tblName, long from, + long to) throws MetaException { + // TODO: Auto-generated method stub + throw new UnsupportedOperationException(); + } } diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java index 2a78ce9c0cd5..51c62e301e29 100644 --- a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java +++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java @@ -35,6 +35,8 @@ public class MTable { private String viewOriginalText; private String viewExpandedText; private String tableType; + private long mmNextWriteId; + private long mmWatermarkWriteId; public MTable() {} @@ -55,7 +57,8 @@ public MTable() {} public MTable(String tableName, MDatabase database, MStorageDescriptor sd, String owner, int createTime, int lastAccessTime, int retention, List partitionKeys, Map parameters, - String viewOriginalText, String viewExpandedText, String tableType) { + String viewOriginalText, String viewExpandedText, String tableType, long mmNextWriteId, + long mmWatermarkWriteId) { this.tableName = tableName; this.database = database; this.sd = sd; @@ -68,6 +71,8 @@ public MTable(String tableName, MDatabase database, MStorageDescriptor sd, Strin this.viewOriginalText = viewOriginalText; this.viewExpandedText = viewExpandedText; this.tableType = tableType; + this.mmWatermarkWriteId = mmWatermarkWriteId; + this.mmNextWriteId = mmNextWriteId; } /** @@ -237,4 +242,20 @@ public void setTableType(String tableType) { public String getTableType() { return tableType; } + + public long getMmNextWriteId() { + return mmNextWriteId; + } + + public long getMmWatermarkWriteId() { + return mmWatermarkWriteId; + } + + public void setMmNextWriteId(long mmNextWriteId) { + this.mmNextWriteId = mmNextWriteId; + } + + public void setMmWatermarkWriteId(long mmWatermarkWriteId) { + this.mmWatermarkWriteId = mmWatermarkWriteId; + } } diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java new file mode 100644 index 000000000000..b7f398a1841f --- /dev/null +++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTableWrite.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.model; + +public class MTableWrite { + private MTable table; + private long writeId; + private String state; + private long lastHeartbeat; + private long created; + + public MTableWrite() {} + + public MTableWrite(MTable table, long writeId, String state, long lastHeartbeat, long created) { + this.table = table; + this.writeId = writeId; + this.state = state; + this.lastHeartbeat = lastHeartbeat; + this.created = created; + } + + public MTable getTable() { + return table; + } + + public long getWriteId() { + return writeId; + } + + public String getState() { + return state; + } + + public long getLastHeartbeat() { + return lastHeartbeat; + } + + public long getCreated() { + return created; + } + + public void setTable(MTable table) { + this.table = table; + } + + public void setWriteId(long writeId) { + this.writeId = writeId; + } + + public void setState(String state) { + this.state = state; + } + + public void setLastHeartbeat(long lastHeartbeat) { + this.lastHeartbeat = lastHeartbeat; + } + + public void setCreated(long created) { + this.created = created; + } +} diff --git a/metastore/src/model/package.jdo b/metastore/src/model/package.jdo index bfd6dddcec33..ce101dd002ec 100644 --- a/metastore/src/model/package.jdo +++ b/metastore/src/model/package.jdo @@ -53,7 +53,7 @@ - + @@ -182,6 +182,12 @@ + + + + + + @@ -204,7 +210,7 @@ - + @@ -213,7 +219,7 @@ - + @@ -282,7 +288,7 @@ - + @@ -302,7 +308,7 @@ - + @@ -997,7 +1003,7 @@ - + @@ -1058,6 +1064,32 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index f64b08d8fead..64da9b427660 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -20,6 +20,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; @@ -58,6 +59,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -869,4 +871,52 @@ public void addForeignKeys(List fks) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub } + + @Override + public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) { + } + + @Override + public void updateTableWrite(MTableWrite tw) { + + } + + @Override + public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { + return null; + } + + @Override + @CanNotRetry + public Boolean commitTransactionExpectDeadlock() { + return null; + } + + @Override + public List getTableWriteIds( + String dbName, String tblName, long watermarkId, long nextWriteId, char state) { + return null; + } + + @Override + public List getAllMmTablesForCleanup() throws MetaException { + return null; + } + + @Override + public List getTableWrites(String dbName, String tblName, + long from, long to) throws MetaException { + return null; + } + + @Override + public Collection getAllPartitionLocations(String dbName, + String tblName) { + return null; + } + + @Override + public void deleteTableWrites(String dbName, String tblName, long from, + long to) throws MetaException { + } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 26828865bc48..d6460cd662bb 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import java.nio.ByteBuffer; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; @@ -59,6 +60,7 @@ import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; import org.apache.thrift.TException; @@ -104,14 +106,17 @@ public boolean openTransaction() { @Override public boolean commitTransaction() { + return false; + } + @Override + @CanNotRetry + public Boolean commitTransactionExpectDeadlock() { return false; } @Override public void rollbackTransaction() { - - } @Override @@ -885,6 +890,47 @@ public void addForeignKeys(List fks) throws InvalidObjectException, MetaException { // TODO Auto-generated method stub } + + @Override + public void createTableWrite(Table tbl, long writeId, char state, long heartbeat) { + } + + @Override + public void updateTableWrite(MTableWrite tw) { + } + + @Override + public MTableWrite getTableWrite(String dbName, String tblName, long writeId) { + return null; + } + + @Override + public List getTableWriteIds( + String dbName, String tblName, long watermarkId, long nextWriteId, char state) { + return null; + } + + @Override + public List getAllMmTablesForCleanup() throws MetaException { + return null; + } + + @Override + public List getTableWrites(String dbName, String tblName, + long from, long to) throws MetaException { + return null; + } + + @Override + public Collection getAllPartitionLocations(String dbName, + String tblName) { + return null; + } + + @Override + public void deleteTableWrites(String dbName, String tblName, long from, + long to) throws MetaException { + } } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java index 04971591a680..a8d34955120f 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -17,16 +17,20 @@ */ package org.apache.hadoop.hive.metastore; +import static org.junit.Assert.*; + import java.util.Arrays; import java.util.HashMap; import java.util.List; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; import org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics; import org.apache.hadoop.hive.common.metrics.metrics2.MetricsReporting; import org.apache.hadoop.hive.common.metrics.MetricsTestUtils; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; @@ -42,9 +46,13 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.model.MTableWrite; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hive.common.util.MockFileSystem; +import org.apache.hive.common.util.MockFileSystem.MockFile; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -53,6 +61,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.base.Supplier; + public class TestObjectStore { private ObjectStore objectStore = null; @@ -67,6 +77,15 @@ public class TestObjectStore { private static final String ROLE2 = "testobjectstorerole2"; private static final Logger LOG = LoggerFactory.getLogger(TestObjectStore.class.getName()); + private static final class LongSupplier implements Supplier { + public long value = 0; + + @Override + public Long get() { + return value; + } + } + public static class MockPartitionExpressionProxy implements PartitionExpressionProxy { @Override public String convertExprToFilter(byte[] expr) throws MetaException { @@ -142,7 +161,7 @@ public void testDatabaseOps() throws MetaException, InvalidObjectException, NoSu public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { Database db1 = new Database(DB1, "description", "locationurl", null); objectStore.createDatabase(db1); - StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); + StorageDescriptor sd = createFakeSd("location"); HashMap params = new HashMap(); params.put("EXTERNAL", "false"); Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); @@ -164,6 +183,156 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO objectStore.dropDatabase(DB1); } + + + /** + * Test table operations + */ + @Test + public void testMmCleaner() throws Exception { + HiveConf conf = new HiveConf(); + conf.set(ConfVars.HIVE_METASTORE_MM_HEARTBEAT_TIMEOUT.varname, "3ms"); + conf.set(ConfVars.HIVE_METASTORE_MM_ABSOLUTE_TIMEOUT.varname, "20ms"); + conf.set(ConfVars.HIVE_METASTORE_MM_ABORTED_GRACE_PERIOD.varname, "5ms"); + conf.set("fs.mock.impl", MockFileSystem.class.getName()); + + MockFileSystem mfs = (MockFileSystem)(new Path("mock:///").getFileSystem(conf)); + mfs.clear(); + mfs.allowDelete = true; + // Don't add the files just yet... + MockFile[] files = new MockFile[9]; + for (int i = 0; i < files.length; ++i) { + files[i] = new MockFile("mock:/foo/mm_" + i + "/1", 0, new byte[0]); + } + + LongSupplier time = new LongSupplier(); + + MmCleanerThread mct = new MmCleanerThread(0); + mct.setHiveConf(conf); + mct.overrideTime(time); + + Database db1 = new Database(DB1, "description", "locationurl", null); + objectStore.createDatabase(db1); + StorageDescriptor sd = createFakeSd("mock:/foo"); + HashMap params = new HashMap(); + params.put("EXTERNAL", "false"); + params.put(hive_metastoreConstants.TABLE_IS_MM, "true"); + Table tbl = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, + null, params, null, null, "MANAGED_TABLE"); + objectStore.createTable(tbl); + + // Add write #0 so the watermark wouldn't advance; skip write #1, add #2 at 0, skip #3 + createCompleteTableWrite(mfs, files, 0, time, tbl, HiveMetaStore.MM_WRITE_OPEN); + mfs.addFile(files[1]); + createCompleteTableWrite(mfs, files, 2, time, tbl, HiveMetaStore.MM_WRITE_OPEN); + mfs.addFile(files[3]); + tbl.setMmNextWriteId(4); + objectStore.alterTable(DB1, TABLE1, tbl); + + mct.runOneIteration(objectStore); + List writes = getAbortedWrites(); + assertEquals(0, writes.size()); // Missing write is not aborted before timeout. + time.value = 4; // Advance time. + mct.runOneIteration(objectStore); + writes = getAbortedWrites(); + assertEquals(1, writes.size()); // Missing write is aborted after timeout. + assertEquals(1L, writes.get(0).longValue()); + checkDeletedSet(files, 1); + // However, write #3 was not aborted as we cannot determine when it will time out. + createCompleteTableWrite(mfs, files, 4, time, tbl, HiveMetaStore.MM_WRITE_OPEN); + time.value = 8; + // It will now be aborted, since we have a following write. + mct.runOneIteration(objectStore); + writes = getAbortedWrites(); + assertEquals(2, writes.size()); + assertTrue(writes.contains(Long.valueOf(3))); + checkDeletedSet(files, 1, 3); + + // Commit #0 and #2 and confirm that the watermark advances. + // It will only advance over #1, since #3 was aborted at 8 and grace period has not passed. + time.value = 10; + MTableWrite tw = objectStore.getTableWrite(DB1, TABLE1, 0); + tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_COMMITTED)); + objectStore.updateTableWrite(tw); + tw = objectStore.getTableWrite(DB1, TABLE1, 2); + tw.setState(String.valueOf(HiveMetaStore.MM_WRITE_COMMITTED)); + objectStore.updateTableWrite(tw); + mct.runOneIteration(objectStore); + writes = getAbortedWrites(); + assertEquals(1, writes.size()); + assertEquals(3L, writes.get(0).longValue()); + tbl = objectStore.getTable(DB1, TABLE1); + assertEquals(2L, tbl.getMmWatermarkWriteId()); + + // Now advance the time and see that watermark also advances over #3. + time.value = 16; + mct.runOneIteration(objectStore); + writes = getAbortedWrites(); + assertEquals(0, writes.size()); + tbl = objectStore.getTable(DB1, TABLE1); + assertEquals(3L, tbl.getMmWatermarkWriteId()); + + // Check that the open write gets aborted after some time; then the watermark advances. + time.value = 25; + mct.runOneIteration(objectStore); + writes = getAbortedWrites(); + assertEquals(1, writes.size()); + assertEquals(4L, writes.get(0).longValue()); + time.value = 31; + mct.runOneIteration(objectStore); + tbl = objectStore.getTable(DB1, TABLE1); + assertEquals(4L, tbl.getMmWatermarkWriteId()); + checkDeletedSet(files, 1, 3, 4); // The other two should still be deleted. + + // Finally check that we cannot advance watermark if cleanup fails for some file. + createCompleteTableWrite(mfs, files, 5, time, tbl, HiveMetaStore.MM_WRITE_ABORTED); + createCompleteTableWrite(mfs, files, 6, time, tbl, HiveMetaStore.MM_WRITE_ABORTED); + createCompleteTableWrite(mfs, files, 7, time, tbl, HiveMetaStore.MM_WRITE_COMMITTED); + createCompleteTableWrite(mfs, files, 8, time, tbl, HiveMetaStore.MM_WRITE_ABORTED); + time.value = 37; // Skip the grace period. + files[6].cannotDelete = true; + mct.runOneIteration(objectStore); + checkDeletedSet(files, 1, 3, 4, 5, 8); // The other two should still be deleted. + tbl = objectStore.getTable(DB1, TABLE1); + assertEquals(5L, tbl.getMmWatermarkWriteId()); // Watermark only goes up to 5. + files[6].cannotDelete = false; + mct.runOneIteration(objectStore); + checkDeletedSet(files, 1, 3, 4, 5, 6, 8); + tbl = objectStore.getTable(DB1, TABLE1); + assertEquals(8L, tbl.getMmWatermarkWriteId()); // Now it advances all the way. + + objectStore.dropTable(DB1, TABLE1); + objectStore.dropDatabase(DB1); + } + + private void createCompleteTableWrite(MockFileSystem mfs, MockFile[] files, + int id, LongSupplier time, Table tbl, char state) throws MetaException, InvalidObjectException { + objectStore.createTableWrite(tbl, id, state, time.value); + mfs.addFile(files[id]); + tbl.setMmNextWriteId(id + 1); + objectStore.alterTable(DB1, TABLE1, tbl); + } + + private void checkDeletedSet(MockFile[] files, int... deleted) { + for (int id : deleted) { + assertTrue("File " + id + " not deleted", files[id].isDeleted); + } + int count = 0; + for (MockFile file : files) { + if (file.isDeleted) ++count; + } + assertEquals(deleted.length, count); // Make sure nothing else is deleted. + } + + private List getAbortedWrites() throws MetaException { + return objectStore.getTableWriteIds(DB1, TABLE1, -1, 10, HiveMetaStore.MM_WRITE_ABORTED); + } + + private StorageDescriptor createFakeSd(String location) { + return new StorageDescriptor(null, location, null, null, false, 0, + new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); + } + /** * Tests partition operations @@ -172,7 +341,7 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO public void testPartitionOps() throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException { Database db1 = new Database(DB1, "description", "locationurl", null); objectStore.createDatabase(db1); - StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); + StorageDescriptor sd = createFakeSd("location"); HashMap tableParams = new HashMap(); tableParams.put("EXTERNAL", "false"); FieldSchema partitionKey1 = new FieldSchema("Country", serdeConstants.STRING_TYPE_NAME, ""); @@ -265,7 +434,7 @@ public void testDirectSqlErrorMetrics() throws Exception { MetricsFactory.init(conf); CodahaleMetrics metrics = (CodahaleMetrics) MetricsFactory.getInstance(); - objectStore.new GetDbHelper("foo", null, true, true) { + objectStore.new GetDbHelper("foo", true, true) { @Override protected Database getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { return null; @@ -282,7 +451,7 @@ protected Database getJdoResult(ObjectStore.GetHelper ctx) throws Meta MetricsTestUtils.verifyMetricsJson(json, MetricsTestUtils.COUNTER, MetricsConstant.DIRECTSQL_ERRORS, ""); - objectStore.new GetDbHelper("foo", null, true, true) { + objectStore.new GetDbHelper("foo", true, true) { @Override protected Database getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { throw new RuntimeException(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java index 838d73e4d224..0b10cad55fb5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -233,7 +233,8 @@ private Path getStagingDir(Path inputPath, boolean mkdir) { // Append task specific info to stagingPathName, instead of creating a sub-directory. // This way we don't have to worry about deleting the stagingPathName separately at // end of query execution. - dir = fs.makeQualified(new Path(stagingPathName + "_" + this.executionId + "-" + TaskRunner.getTaskRunnerID())); + dir = fs.makeQualified(new Path( + stagingPathName + "_" + this.executionId + "-" + TaskRunner.getTaskRunnerID())); LOG.debug("Created staging dir = " + dir + " for path = " + inputPath); @@ -830,7 +831,7 @@ public void setExplainConfig(ExplainConfiguration explainConfig) { this.explainConfig = explainConfig; } - public void resetOpContext(){ + public void resetOpContext() { opContext = new CompilationOpContext(); sequencer = new AtomicInteger(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index 9e5fd371c53f..690cdff4035e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -39,6 +39,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.common.metrics.common.Metrics; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; @@ -46,8 +47,11 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.conf.HiveVariableSource; import org.apache.hadoop.hive.conf.VariableSubstitution; +import org.apache.hadoop.hive.metastore.LockComponentBuilder; import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.LockComponent; import org.apache.hadoop.hive.metastore.api.Schema; import org.apache.hadoop.hive.ql.exec.ConditionalTask; import org.apache.hadoop.hive.ql.exec.ExplainTask; @@ -71,6 +75,7 @@ import org.apache.hadoop.hive.ql.hooks.QueryLifeTimeHookContextImpl; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.lockmgr.HiveLock; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; @@ -1505,6 +1510,13 @@ else if(!plan.getAutoCommitValue() && txnManager.getAutoCommit()) { return rollback(createProcessorResponse(ret)); } } + + try { + acquireWriteIds(plan, conf); + } catch (HiveException e) { + return handleHiveException(e, 1); + } + ret = execute(true); if (ret != 0) { //if needRequireLock is false, the release here will do nothing because there is no lock @@ -1565,6 +1577,35 @@ else if(plan.getOperation() == HiveOperation.ROLLBACK) { } } + private static void acquireWriteIds(QueryPlan plan, HiveConf conf) throws HiveException { + // Output IDs are put directly into FileSinkDesc; here, we only need to take care of inputs. + for (ReadEntity input : plan.getInputs()) { + Table t = extractMmTable(input); + if (t == null) continue; + ValidWriteIds ids = Hive.get().getValidWriteIdsForTable(t.getDbName(), t.getTableName()); + ids.addToConf(conf, t.getDbName(), t.getTableName()); + if (plan.getFetchTask() != null) { + ids.addToConf(plan.getFetchTask().getFetchConf(), t.getDbName(), t.getTableName()); + } + } + } + + private static Table extractMmTable(ReadEntity input) { + Table t = null; + switch (input.getType()) { + case TABLE: + t = input.getTable(); + break; + case DUMMYPARTITION: + case PARTITION: + t = input.getPartition().getTable(); + break; + default: return null; + } + return (t != null && !t.isTemporary() + && MetaStoreUtils.isMmTable(t.getParameters())) ? t : null; + } + private CommandProcessorResponse rollback(CommandProcessorResponse cpr) { //console.printError(cpr.toString()); try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java index dfad6c192947..dedbb786f49e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/AbstractFileMergeOperator.java @@ -34,6 +34,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.common.collect.Lists; + /** * Fast file merge operator for ORC and RCfile. This is an abstract class which * does not process any rows. Refer {@link org.apache.hadoop.hive.ql.exec.OrcFileMergeOperator} @@ -47,20 +49,21 @@ public abstract class AbstractFileMergeOperator protected JobConf jc; protected FileSystem fs; - protected boolean autoDelete; - protected boolean exception; - protected Path outPath; - protected Path finalPath; - protected Path dpPath; - protected Path tmpPath; - protected Path taskTmpPath; - protected int listBucketingDepth; - protected boolean hasDynamicPartitions; - protected boolean isListBucketingAlterTableConcatenate; - protected boolean tmpPathFixedConcatenate; - protected boolean tmpPathFixed; - protected Set incompatFileSet; - protected transient DynamicPartitionCtx dpCtx; + private boolean autoDelete; + private Path outPath; // The output path used by the subclasses. + private Path finalPath; // Used as a final destination; same as outPath for MM tables. + private Path dpPath; + private Path tmpPath; // Only stored to update based on the original in fixTmpPath. + private Path taskTmpPath; // Only stored to update based on the original in fixTmpPath. + private int listBucketingDepth; + private boolean hasDynamicPartitions; + private boolean isListBucketingAlterTableConcatenate; + private boolean tmpPathFixedConcatenate; + private boolean tmpPathFixed; + private Set incompatFileSet; + private transient DynamicPartitionCtx dpCtx; + private boolean isMmTable; + private String taskId; /** Kryo ctor. */ protected AbstractFileMergeOperator() { @@ -77,39 +80,50 @@ public void initializeOp(Configuration hconf) throws HiveException { this.jc = new JobConf(hconf); incompatFileSet = new HashSet(); autoDelete = false; - exception = false; tmpPathFixed = false; tmpPathFixedConcatenate = false; - outPath = null; - finalPath = null; dpPath = null; - tmpPath = null; - taskTmpPath = null; dpCtx = conf.getDpCtx(); hasDynamicPartitions = conf.hasDynamicPartitions(); isListBucketingAlterTableConcatenate = conf .isListBucketingAlterTableConcatenate(); listBucketingDepth = conf.getListBucketingDepth(); Path specPath = conf.getOutputPath(); - updatePaths(Utilities.toTempPath(specPath), - Utilities.toTaskTempPath(specPath)); + isMmTable = conf.getMmWriteId() != null; + if (isMmTable) { + updatePaths(specPath, null); + } else { + updatePaths(Utilities.toTempPath(specPath), Utilities.toTaskTempPath(specPath)); + } try { fs = specPath.getFileSystem(hconf); - autoDelete = fs.deleteOnExit(outPath); + if (!isMmTable) { + // Do not delete for MM tables. We either want the file if we succeed, or we must + // delete is explicitly before proceeding if the merge fails. + autoDelete = fs.deleteOnExit(outPath); + } } catch (IOException e) { - this.exception = true; - throw new HiveException("Failed to initialize AbstractFileMergeOperator", - e); + throw new HiveException("Failed to initialize AbstractFileMergeOperator", e); } } // sets up temp and task temp path private void updatePaths(Path tp, Path ttp) { - String taskId = Utilities.getTaskId(jc); + if (taskId == null) { + taskId = Utilities.getTaskId(jc); + } tmpPath = tp; - taskTmpPath = ttp; - finalPath = new Path(tp, taskId); - outPath = new Path(ttp, Utilities.toTempPath(taskId)); + if (isMmTable) { + taskTmpPath = null; + // Make sure we don't collide with the source. + outPath = finalPath = new Path(tmpPath, taskId + ".merged"); + } else { + taskTmpPath = ttp; + finalPath = new Path(tp, taskId); + outPath = new Path(ttp, Utilities.toTempPath(taskId)); + } + Utilities.LOG14535.info("Paths for merge " + taskId + ": tmp " + tmpPath + ", task " + + taskTmpPath + ", final " + finalPath + ", out " + outPath, new Exception()); } /** @@ -142,7 +156,7 @@ private void updatePaths(Path tp, Path ttp) { protected void fixTmpPath(Path inputPath, int depthDiff) throws IOException { // don't need to update tmp paths when there is no depth difference in paths - if (depthDiff <=0) { + if (depthDiff <= 0) { return; } @@ -157,10 +171,12 @@ protected void fixTmpPath(Path inputPath, int depthDiff) throws IOException { } Path newTmpPath = new Path(tmpPath, newPath); - Path newTaskTmpPath = new Path(taskTmpPath, newPath); if (!fs.exists(newTmpPath)) { + Utilities.LOG14535.info("Creating " + newTmpPath); fs.mkdirs(newTmpPath); } + + Path newTaskTmpPath = (taskTmpPath != null) ? new Path(taskTmpPath, newPath) : null; updatePaths(newTmpPath, newTaskTmpPath); } @@ -182,7 +198,7 @@ protected void checkPartitionsMatch(Path inputPath) throws IOException { } protected void fixTmpPath(Path path) throws IOException { - + Utilities.LOG14535.info("Calling fixTmpPath with " + path); // Fix temp path for alter table ... concatenate if (isListBucketingAlterTableConcatenate) { if (this.tmpPathFixedConcatenate) { @@ -208,38 +224,49 @@ protected void fixTmpPath(Path path) throws IOException { @Override public void closeOp(boolean abort) throws HiveException { try { - if (!abort) { - // if outPath does not exist, then it means all paths within combine split are skipped as - // they are incompatible for merge (for example: files without stripe stats). - // Those files will be added to incompatFileSet - if (fs.exists(outPath)) { - FileStatus fss = fs.getFileStatus(outPath); + if (abort) { + if (!autoDelete || isMmTable) { + fs.delete(outPath, true); + } + return; + } + // if outPath does not exist, then it means all paths within combine split are skipped as + // they are incompatible for merge (for example: files without stripe stats). + // Those files will be added to incompatFileSet + if (fs.exists(outPath)) { + FileStatus fss = fs.getFileStatus(outPath); + if (!isMmTable) { if (!fs.rename(outPath, finalPath)) { - throw new IOException( - "Unable to rename " + outPath + " to " + finalPath); + throw new IOException("Unable to rename " + outPath + " to " + finalPath); } - LOG.info("renamed path " + outPath + " to " + finalPath + " . File" + - " size is " - + fss.getLen()); + LOG.info("Renamed path " + outPath + " to " + finalPath + + "(" + fss.getLen() + " bytes)."); + } else { + assert finalPath.equals(outPath); + // There's always just one file that we have merged. + // The union/DP/etc. should already be account for in the path. + Utilities.writeMmCommitManifest(Lists.newArrayList(outPath), + tmpPath.getParent(), fs, taskId, conf.getMmWriteId(), null); + LOG.info("Merged into " + finalPath + "(" + fss.getLen() + " bytes)."); } + } - // move any incompatible files to final path - if (incompatFileSet != null && !incompatFileSet.isEmpty()) { - for (Path incompatFile : incompatFileSet) { - Path destDir = finalPath.getParent(); - try { - Utilities.renameOrMoveFiles(fs, incompatFile, destDir); - LOG.info("Moved incompatible file " + incompatFile + " to " + - destDir); - } catch (HiveException e) { - LOG.error("Unable to move " + incompatFile + " to " + destDir); - throw new IOException(e); - } - } + // move any incompatible files to final path + if (incompatFileSet != null && !incompatFileSet.isEmpty()) { + if (isMmTable) { + // We only support query-time merge for MM tables, so don't handle this. + throw new HiveException("Incompatible files should not happen in MM tables."); } - } else { - if (!autoDelete) { - fs.delete(outPath, true); + for (Path incompatFile : incompatFileSet) { + Path destDir = finalPath.getParent(); + try { + Utilities.renameOrMoveFiles(fs, incompatFile, destDir); + LOG.info("Moved incompatible file " + incompatFile + " to " + + destDir); + } catch (HiveException e) { + LOG.error("Unable to move " + incompatFile + " to " + destDir); + throw new IOException(e); + } } } } catch (IOException e) { @@ -253,16 +280,26 @@ public void jobCloseOp(Configuration hconf, boolean success) try { Path outputDir = conf.getOutputPath(); FileSystem fs = outputDir.getFileSystem(hconf); - Path backupPath = backupOutputPath(fs, outputDir); - Utilities - .mvFileToFinalPath(outputDir, hconf, success, LOG, conf.getDpCtx(), - null, reporter); - if (success) { - LOG.info("jobCloseOp moved merged files to output dir: " + outputDir); - } - if (backupPath != null) { - fs.delete(backupPath, true); + Long mmWriteId = conf.getMmWriteId(); + if (mmWriteId == null) { + Path backupPath = backupOutputPath(fs, outputDir); + Utilities.mvFileToFinalPath( + outputDir, hconf, success, LOG, conf.getDpCtx(), null, reporter); + if (success) { + LOG.info("jobCloseOp moved merged files to output dir: " + outputDir); + } + if (backupPath != null) { + fs.delete(backupPath, true); + } + } else { + int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), + lbLevels = conf.getListBucketingDepth(); + // We don't expect missing buckets from mere (actually there should be no buckets), + // so just pass null as bucketing context. Union suffix should also be accounted for. + Utilities.handleMmTableFinalPath(outputDir.getParent(), null, hconf, success, + dpLevels, lbLevels, null, mmWriteId, reporter); } + } catch (IOException e) { throw new HiveException("Failed jobCloseOp for AbstractFileMergeOperator", e); @@ -290,4 +327,12 @@ public String getName() { public static String getOperatorName() { return "MERGE"; } + + protected final Path getOutPath() { + return outPath; + } + + protected final void addIncompatibleFile(Path path) { + incompatFileSet.add(path); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java index cbe0aca142da..a8a44bcd67d1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java @@ -18,14 +18,20 @@ package org.apache.hadoop.hive.ql.exec; +import java.io.FileNotFoundException; +import java.io.IOException; import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer; @@ -37,7 +43,6 @@ * CopyTask implementation. **/ public class CopyTask extends Task implements Serializable { - private static final long serialVersionUID = 1L; private static transient final Logger LOG = LoggerFactory.getLogger(CopyTask.class); @@ -60,7 +65,7 @@ public int execute(DriverContext driverContext) { FileSystem srcFs = fromPath.getFileSystem(conf); dstFs = toPath.getFileSystem(conf); - FileStatus[] srcs = LoadSemanticAnalyzer.matchFilesOrDir(srcFs, fromPath); + FileStatus[] srcs = matchFilesOrDir(srcFs, fromPath, work.isSourceMm()); if (srcs == null || srcs.length == 0) { if (work.isErrorOnSrcEmpty()) { console.printError("No files matching path: " + fromPath.toString()); @@ -97,6 +102,45 @@ public int execute(DriverContext driverContext) { } } + // Note: initially copied from LoadSemanticAnalyzer. + private static FileStatus[] matchFilesOrDir( + FileSystem fs, Path path, boolean isSourceMm) throws IOException { + if (!isSourceMm) return matchFilesOneDir(fs, path, null); + // TODO: this doesn't handle list bucketing properly. Does the original exim do that? + FileStatus[] mmDirs = fs.listStatus(path, new ValidWriteIds.AnyIdDirFilter()); + if (mmDirs == null || mmDirs.length == 0) return null; + List allFiles = new ArrayList(); + for (FileStatus mmDir : mmDirs) { + Utilities.LOG14535.info("Found source MM directory " + mmDir.getPath()); + matchFilesOneDir(fs, mmDir.getPath(), allFiles); + } + return allFiles.toArray(new FileStatus[allFiles.size()]); + } + + private static FileStatus[] matchFilesOneDir( + FileSystem fs, Path path, List result) throws IOException { + FileStatus[] srcs = fs.globStatus(path, new EximPathFilter()); + if (srcs != null && srcs.length == 1) { + if (srcs[0].isDirectory()) { + srcs = fs.listStatus(srcs[0].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER); + } + } + if (result != null && srcs != null) { + for (int i = 0; i < srcs.length; ++i) { + result.add(srcs[i]); + } + } + return srcs; + } + + private static final class EximPathFilter implements PathFilter { + @Override + public boolean accept(Path p) { + String name = p.getName(); + return name.equals("_metadata") ? true : !name.startsWith("_") && !name.startsWith("."); + } + } + @Override public StageType getType() { return StageType.COPY; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index cfece771b9a2..7cf83d81d6fb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -4077,7 +4077,7 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { tbl.getDataLocation()); // create the table - if (crtTbl.getReplaceMode()){ + if (crtTbl.getReplaceMode()) { // replace-mode creates are really alters using CreateTableDesc. try { db.alterTable(tbl.getDbName()+"."+tbl.getTableName(),tbl,null); @@ -4091,18 +4091,36 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { } else { db.createTable(tbl, crtTbl.getIfNotExists()); } - if ( crtTbl.isCTAS()) { + Long mmWriteId = crtTbl.getInitialMmWriteId(); + if (crtTbl.isCTAS() || mmWriteId != null) { Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName()); - DataContainer dc = new DataContainer(createdTable.getTTable()); - SessionState.get().getLineageState().setLineage( - createdTable.getPath(), dc, createdTable.getCols() - ); + if (mmWriteId != null) { + // TODO# this would be retrieved via ACID before the query runs; for now we rely on it + // being zero at start; we can't create a write ID before we create the table here. + long initialWriteId = db.getNextTableWriteId(tbl.getDbName(), tbl.getTableName()); + if (initialWriteId != mmWriteId) { + throw new HiveException("Initial write ID mismatch - expected " + mmWriteId + + " but got " + initialWriteId); + } + // CTAS create the table on a directory that already exists; import creates the table + // first (in parallel with copies?), then commits after all the loads. + if (crtTbl.isCTAS()) { + db.commitMmTableWrite(tbl, initialWriteId); + } + } + if (crtTbl.isCTAS()) { + DataContainer dc = new DataContainer(createdTable.getTTable()); + SessionState.get().getLineageState().setLineage( + createdTable.getPath(), dc, createdTable.getCols() + ); + } } } addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK)); return 0; } + /** * Create a new table like an existing table. * diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DependencyCollectionTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DependencyCollectionTask.java index 9189cfc6f302..e6395724f683 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DependencyCollectionTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DependencyCollectionTask.java @@ -20,7 +20,6 @@ import java.io.Serializable; -import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.api.StageType; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index 601ad08331d5..f89372cfd471 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -30,12 +30,15 @@ import org.apache.commons.lang3.StringEscapeUtils; import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.HiveContextAwareRecordReader; @@ -75,6 +78,7 @@ import org.slf4j.LoggerFactory; import com.google.common.collect.Iterators; +import com.google.common.collect.Lists; /** * FetchTask implementation. @@ -124,6 +128,7 @@ public class FetchOperator implements Serializable { private transient StructObjectInspector outputOI; private transient Object[] row; + private transient Map writeIdMap; public FetchOperator(FetchWork work, JobConf job) throws HiveException { this(work, job, null, null); @@ -368,6 +373,9 @@ protected FetchInputFormatSplit[] getNextSplits() throws Exception { Class formatter = currDesc.getInputFileFormatClass(); Utilities.copyTableJobPropertiesToConf(currDesc.getTableDesc(), job); InputFormat inputFormat = getInputFormatFromCache(formatter, job); + String inputs = processCurrPathForMmWriteIds(inputFormat); + if (inputs == null) return null; + job.set("mapred.input.dir", inputs); InputSplit[] splits = inputFormat.getSplits(job, 1); FetchInputFormatSplit[] inputSplits = new FetchInputFormatSplit[splits.length]; @@ -384,6 +392,30 @@ protected FetchInputFormatSplit[] getNextSplits() throws Exception { return null; } + private String processCurrPathForMmWriteIds(InputFormat inputFormat) throws IOException { + if (inputFormat instanceof HiveInputFormat) { + return StringUtils.escapeString(currPath.toString()); // No need to process here. + } + if (writeIdMap == null) { + writeIdMap = new HashMap(); + } + // No need to check for MM table - if it is, the IDs should be in the job config. + ValidWriteIds ids = HiveInputFormat.extractWriteIds(writeIdMap, job, currDesc.getTableName()); + if (ids != null) { + Utilities.LOG14535.info("Observing " + currDesc.getTableName() + ": " + ids); + } + + Path[] dirs = HiveInputFormat.processPathsForMmRead(Lists.newArrayList(currPath), job, ids); + if (dirs == null || dirs.length == 0) { + return null; // No valid inputs. This condition is logged inside the call. + } + StringBuffer str = new StringBuffer(StringUtils.escapeString(dirs[0].toString())); + for(int i = 1; i < dirs.length;i++) { + str.append(",").append(StringUtils.escapeString(dirs[i].toString())); + } + return str.toString(); + } + private FetchInputFormatSplit[] splitSampling(SplitSample splitSample, FetchInputFormatSplit[] splits) { long totalSize = 0; @@ -695,4 +727,8 @@ public RecordReader getRecordReader(JobConf job) t return inputFormat.getRecordReader(getInputSplit(), job, Reporter.NULL); } } + + public Configuration getJobConf() { + return job; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java index 8c7d99d07126..93c03fd0e6f3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java @@ -24,6 +24,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.CommandNeedRetryException; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.DriverContext; @@ -193,4 +194,8 @@ public void clearFetch() throws HiveException { } } + public Configuration getFetchConf() { + return fetch.getJobConf(); + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index eeba6cdf33af..22b2149c74c7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -20,7 +20,6 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_TEMPORARY_TABLE_STORAGE; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.Serializable; import java.io.StringWriter; @@ -33,15 +32,21 @@ import java.util.Set; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.ErrorMsg; +import org.apache.hadoop.hive.ql.exec.Utilities.MissingBucketsContext; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveKey; @@ -79,6 +84,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Writable; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hive.common.util.HiveStringUtils; import org.slf4j.Logger; @@ -89,6 +95,7 @@ /** * File Sink operator implementation. **/ +@SuppressWarnings("deprecation") public class FileSinkOperator extends TerminalOperator implements Serializable { @@ -105,7 +112,8 @@ public class FileSinkOperator extends TerminalOperator implements protected transient Path parent; protected transient HiveOutputFormat hiveOutputFormat; protected transient Path specPath; - protected transient String childSpecPathDynLinkedPartitions; + protected transient String unionPath; + protected transient boolean isUnionDp; protected transient int dpStartCol; // start column # for DP columns protected transient List dpVals; // array of values corresponding to DP columns protected transient List dpWritables; @@ -143,8 +151,8 @@ public static interface RecordWriter { } public class FSPaths implements Cloneable { - Path tmpPath; - Path taskOutputTempPath; + private Path tmpPath; + private Path taskOutputTempPath; Path[] outPaths; Path[] finalPaths; RecordWriter[] outWriters; @@ -152,10 +160,21 @@ public class FSPaths implements Cloneable { Stat stat; int acidLastBucket = -1; int acidFileOffset = -1; + private boolean isMmTable; + + public FSPaths(Path specPath, boolean isMmTable) { + this.isMmTable = isMmTable; + if (!isMmTable) { + tmpPath = Utilities.toTempPath(specPath); + taskOutputTempPath = Utilities.toTaskTempPath(specPath); + } else { + tmpPath = specPath; + taskOutputTempPath = null; // Should not be used. + } + Utilities.LOG14535.info("new FSPaths for " + numFiles + " files, dynParts = " + bDynParts + + ": tmpPath " + tmpPath + ", task path " + taskOutputTempPath + + " (spec path " + specPath + ")"/*, new Exception()*/); - public FSPaths(Path specPath) { - tmpPath = Utilities.toTempPath(specPath); - taskOutputTempPath = Utilities.toTaskTempPath(specPath); outPaths = new Path[numFiles]; finalPaths = new Path[numFiles]; outWriters = new RecordWriter[numFiles]; @@ -176,7 +195,7 @@ public Path getTaskOutPath(String taskId) { /** * Update the final paths according to tmpPath. */ - public Path getFinalPath(String taskId, Path tmpPath, String extension) { + private Path getFinalPath(String taskId, Path tmpPath, String extension) { if (extension != null) { return new Path(tmpPath, taskId + extension); } else { @@ -206,34 +225,10 @@ public void closeWriters(boolean abort) throws HiveException { } } - private void commit(FileSystem fs) throws HiveException { + private void commit(FileSystem fs, List commitPaths) throws HiveException { for (int idx = 0; idx < outPaths.length; ++idx) { try { - if ((bDynParts || isSkewedStoredAsSubDirectories) - && !fs.exists(finalPaths[idx].getParent())) { - fs.mkdirs(finalPaths[idx].getParent()); - } - boolean needToRename = true; - if (conf.getWriteType() == AcidUtils.Operation.UPDATE || - conf.getWriteType() == AcidUtils.Operation.DELETE) { - // If we're updating or deleting there may be no file to close. This can happen - // because the where clause strained out all of the records for a given bucket. So - // before attempting the rename below, check if our file exists. If it doesn't, - // then skip the rename. If it does try it. We could just blindly try the rename - // and avoid the extra stat, but that would mask other errors. - try { - if (outPaths[idx] != null) { - FileStatus stat = fs.getFileStatus(outPaths[idx]); - } - } catch (FileNotFoundException fnfe) { - needToRename = false; - } - } - if (needToRename && outPaths[idx] != null && !fs.rename(outPaths[idx], finalPaths[idx])) { - throw new HiveException("Unable to rename output from: " + - outPaths[idx] + " to: " + finalPaths[idx]); - } - updateProgress(); + commitOneOutPath(idx, fs, commitPaths); } catch (IOException e) { throw new HiveException("Unable to rename output from: " + outPaths[idx] + " to: " + finalPaths[idx], e); @@ -241,6 +236,34 @@ private void commit(FileSystem fs) throws HiveException { } } + private void commitOneOutPath(int idx, FileSystem fs, List commitPaths) + throws IOException, HiveException { + if ((bDynParts || isSkewedStoredAsSubDirectories) + && !fs.exists(finalPaths[idx].getParent())) { + Utilities.LOG14535.info("commit making path for dyn/skew: " + finalPaths[idx].getParent()); + fs.mkdirs(finalPaths[idx].getParent()); + } + // If we're updating or deleting there may be no file to close. This can happen + // because the where clause strained out all of the records for a given bucket. So + // before attempting the rename below, check if our file exists. If it doesn't, + // then skip the rename. If it does try it. We could just blindly try the rename + // and avoid the extra stat, but that would mask other errors. + boolean needToRename = (conf.getWriteType() != AcidUtils.Operation.UPDATE && + conf.getWriteType() != AcidUtils.Operation.DELETE) || fs.exists(outPaths[idx]); + if (needToRename && outPaths[idx] != null) { + Utilities.LOG14535.info("committing " + outPaths[idx] + " to " + finalPaths[idx] + " (" + isMmTable + ")"); + if (isMmTable) { + assert outPaths[idx].equals(finalPaths[idx]); + commitPaths.add(outPaths[idx]); + } else if (!fs.rename(outPaths[idx], finalPaths[idx])) { + throw new HiveException("Unable to rename output from: " + + outPaths[idx] + " to: " + finalPaths[idx]); + } + } + + updateProgress(); + } + public void abortWriters(FileSystem fs, boolean abort, boolean delete) throws HiveException { for (int idx = 0; idx < outWriters.length; idx++) { if (outWriters[idx] != null) { @@ -260,6 +283,65 @@ public void abortWriters(FileSystem fs, boolean abort, boolean delete) throws Hi public Stat getStat() { return stat; } + + public void configureDynPartPath(String dirName, String childSpecPathDynLinkedPartitions) { + dirName = (childSpecPathDynLinkedPartitions == null) ? dirName : + dirName + Path.SEPARATOR + childSpecPathDynLinkedPartitions; + tmpPath = new Path(tmpPath, dirName); + if (taskOutputTempPath != null) { + taskOutputTempPath = new Path(taskOutputTempPath, dirName); + } + } + + public void initializeBucketPaths(int filesIdx, String taskId, boolean isNativeTable, + boolean isSkewedStoredAsSubDirectories) { + if (isNativeTable) { + String extension = Utilities.getFileExtension(jc, isCompressed, hiveOutputFormat); + if (!isMmTable) { + if (!bDynParts && !isSkewedStoredAsSubDirectories) { + finalPaths[filesIdx] = getFinalPath(taskId, parent, extension); + } else { + finalPaths[filesIdx] = getFinalPath(taskId, tmpPath, extension); + } + outPaths[filesIdx] = getTaskOutPath(taskId); + } else { + String subdirPath = ValidWriteIds.getMmFilePrefix(conf.getMmWriteId()); + if (unionPath != null) { + // Create the union directory inside the MM directory. + subdirPath += Path.SEPARATOR + unionPath; + } + subdirPath += Path.SEPARATOR + taskId; + if (conf.isMerge()) { + // Make sure we don't collide with the source files. + // MM tables don't support concat so we don't expect the merge of merged files. + subdirPath += ".merged"; + } + if (!bDynParts && !isSkewedStoredAsSubDirectories) { + finalPaths[filesIdx] = getFinalPath(subdirPath, specPath, extension); + } else { + // Note: tmpPath here has the correct partition key + finalPaths[filesIdx] = getFinalPath(subdirPath, tmpPath, extension); + } + outPaths[filesIdx] = finalPaths[filesIdx]; + } + if (isInfoEnabled) { + LOG.info("Final Path: FS " + finalPaths[filesIdx]); + if (isInfoEnabled && !isMmTable) { + LOG.info("Writing to temp file: FS " + outPaths[filesIdx]); + } + } + } else { + finalPaths[filesIdx] = outPaths[filesIdx] = specPath; + } + } + + public Path getTmpPath() { + return tmpPath; + } + + public Path getTaskOutputTempPath() { + return taskOutputTempPath; + } } // class FSPaths private static final long serialVersionUID = 1L; @@ -310,14 +392,25 @@ private void initializeSpecPath() { // and Parent/DynamicPartition/Child_1 respectively. // The movetask that follows subQ1 and subQ2 tasks still moves the directory // 'Parent' - if ((!conf.isLinkedFileSink()) || (dpCtx == null)) { + boolean isLinked = conf.isLinkedFileSink(); + if (!isLinked) { + // Simple case - no union. specPath = conf.getDirName(); - childSpecPathDynLinkedPartitions = null; - return; + unionPath = null; + } else { + isUnionDp = (dpCtx != null); + if (conf.isMmTable() || isUnionDp) { + // MM tables need custom handling for union suffix; DP tables use parent too. + specPath = conf.getParentDir(); + unionPath = conf.getDirName().getName(); + } else { + // For now, keep the old logic for non-MM non-DP union case. Should probably be unified. + specPath = conf.getDirName(); + unionPath = null; + } } - - specPath = conf.getParentDir(); - childSpecPathDynLinkedPartitions = conf.getDirName().getName(); + Utilities.LOG14535.info("Setting up FSOP " + System.identityHashCode(this) + " (" + + conf.isLinkedFileSink() + ") with " + taskId + " and " + specPath + " + " + unionPath); } /** Kryo ctor. */ @@ -397,7 +490,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { } if (!bDynParts) { - fsp = new FSPaths(specPath); + fsp = new FSPaths(specPath, conf.isMmTable()); // Create all the files - this is required because empty files need to be created for // empty buckets @@ -411,6 +504,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { .getVar(hconf, HIVE_TEMPORARY_TABLE_STORAGE)); if (isTemporary && fsp != null && tmpStorage != StoragePolicyValue.DEFAULT) { + assert !conf.isMmTable(); // Not supported for temp tables. final Path outputPath = fsp.taskOutputTempPath; StoragePolicyShim shim = ShimLoader.getHadoopShims() .getStoragePolicyShim(fs); @@ -557,7 +651,7 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { assert filesIdx == numFiles; // in recent hadoop versions, use deleteOnExit to clean tmp files. - if (isNativeTable && fs != null && fsp != null) { + if (isNativeTable && fs != null && fsp != null && !conf.isMmTable()) { autoDelete = fs.deleteOnExit(fsp.outPaths[0]); } } catch (Exception e) { @@ -571,34 +665,16 @@ protected void createBucketFiles(FSPaths fsp) throws HiveException { protected void createBucketForFileIdx(FSPaths fsp, int filesIdx) throws HiveException { try { - if (isNativeTable) { - fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, null); - if (isInfoEnabled) { - LOG.info("Final Path: FS " + fsp.finalPaths[filesIdx]); - } - fsp.outPaths[filesIdx] = fsp.getTaskOutPath(taskId); - if (isInfoEnabled) { - LOG.info("Writing to temp file: FS " + fsp.outPaths[filesIdx]); - } - } else { - fsp.finalPaths[filesIdx] = fsp.outPaths[filesIdx] = specPath; - } - // The reason to keep these instead of using - // OutputFormat.getRecordWriter() is that - // getRecordWriter does not give us enough control over the file name that - // we create. - String extension = Utilities.getFileExtension(jc, isCompressed, hiveOutputFormat); - if (!bDynParts && !this.isSkewedStoredAsSubDirectories) { - fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, parent, extension); - } else { - fsp.finalPaths[filesIdx] = fsp.getFinalPath(taskId, fsp.tmpPath, extension); - } + fsp.initializeBucketPaths(filesIdx, taskId, isNativeTable, isSkewedStoredAsSubDirectories); + Utilities.LOG14535.info("createBucketForFileIdx " + filesIdx + ": final path " + fsp.finalPaths[filesIdx] + + "; out path " + fsp.outPaths[filesIdx] +" (spec path " + specPath + ", tmp path " + + fsp.getTmpPath() + ", task " + taskId + ")"/*, new Exception()*/); if (isInfoEnabled) { LOG.info("New Final Path: FS " + fsp.finalPaths[filesIdx]); } - if (isNativeTable) { + if (isNativeTable && !conf.isMmTable()) { // in recent hadoop versions, use deleteOnExit to clean tmp files. autoDelete = fs.deleteOnExit(fsp.outPaths[filesIdx]); } @@ -606,7 +682,8 @@ protected void createBucketForFileIdx(FSPaths fsp, int filesIdx) Utilities.copyTableJobPropertiesToConf(conf.getTableInfo(), jc); // only create bucket files only if no dynamic partitions, // buckets of dynamic partitions will be created for each newly created partition - if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) { + if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || + conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) { fsp.outWriters[filesIdx] = HiveFileFormatUtils.getHiveRecordWriter(jc, conf.getTableInfo(), outputClass, conf, fsp.outPaths[filesIdx], reporter); // If the record writer provides stats, get it from there instead of the serde @@ -749,7 +826,8 @@ public void process(Object row, int tag) throws HiveException { // for a given operator branch prediction should work quite nicely on it. // RecordUpdateer expects to get the actual row, not a serialized version of it. Thus we // pass the row rather than recordValue. - if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) { + if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || + conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) { rowOutWriters[writerOffset].write(recordValue); } else if (conf.getWriteType() == AcidUtils.Operation.INSERT) { fpaths.updaters[writerOffset].insert(conf.getTransactionId(), row); @@ -793,7 +871,8 @@ public void process(Object row, int tag) throws HiveException { protected boolean areAllTrue(boolean[] statsFromRW) { // If we are doing an acid operation they will always all be true as RecordUpdaters always // collect stats - if (conf.getWriteType() != AcidUtils.Operation.NOT_ACID) { + if (conf.getWriteType() != AcidUtils.Operation.NOT_ACID && + conf.getWriteType() != AcidUtils.Operation.INSERT_ONLY) { return true; } for(boolean b : statsFromRW) { @@ -829,6 +908,7 @@ private int findWriterOffset(Object row) throws HiveException { protected FSPaths lookupListBucketingPaths(String lbDirName) throws HiveException { FSPaths fsp2 = valToPaths.get(lbDirName); if (fsp2 == null) { + Utilities.LOG14535.info("lookupListBucketingPaths for " + lbDirName); fsp2 = createNewPaths(lbDirName); } return fsp2; @@ -842,18 +922,10 @@ protected FSPaths lookupListBucketingPaths(String lbDirName) throws HiveExceptio * @throws HiveException */ private FSPaths createNewPaths(String dirName) throws HiveException { - FSPaths fsp2 = new FSPaths(specPath); - if (childSpecPathDynLinkedPartitions != null) { - fsp2.tmpPath = new Path(fsp2.tmpPath, - dirName + Path.SEPARATOR + childSpecPathDynLinkedPartitions); - fsp2.taskOutputTempPath = - new Path(fsp2.taskOutputTempPath, - dirName + Path.SEPARATOR + childSpecPathDynLinkedPartitions); - } else { - fsp2.tmpPath = new Path(fsp2.tmpPath, dirName); - fsp2.taskOutputTempPath = - new Path(fsp2.taskOutputTempPath, dirName); - } + FSPaths fsp2 = new FSPaths(specPath, conf.isMmTable()); + fsp2.configureDynPartPath(dirName, !conf.isMmTable() && isUnionDp ? unionPath : null); + Utilities.LOG14535.info("creating new paths for " + dirName + ", childSpec " + unionPath + + ": tmpPath " + fsp2.getTmpPath() + ", task path " + fsp2.getTaskOutputTempPath()/*, new Exception()*/); if(!conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { createBucketFiles(fsp2); valToPaths.put(dirName, fsp2); @@ -943,7 +1015,8 @@ protected FSPaths getDynOutPaths(List row, String lbDirName) throws Hive // stats from the record writer and store in the previous fsp that is cached if (conf.isGatherStats() && isCollectRWStats) { SerDeStats stats = null; - if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) { + if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || + conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) { RecordWriter outWriter = prevFsp.outWriters[0]; if (outWriter != null) { stats = ((StatsProvidingRecordWriter) outWriter).getStats(); @@ -1036,6 +1109,7 @@ public void closeOp(boolean abort) throws HiveException { throw new HiveException(e); } } + List commitPaths = new ArrayList<>(); for (FSPaths fsp : valToPaths.values()) { fsp.closeWriters(abort); // before closing the operator check if statistics gathering is requested @@ -1046,7 +1120,8 @@ public void closeOp(boolean abort) throws HiveException { // record writer already gathers the statistics, it can simply return the // accumulated statistics which will be aggregated in case of spray writers if (conf.isGatherStats() && isCollectRWStats) { - if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID) { + if (conf.getWriteType() == AcidUtils.Operation.NOT_ACID || + conf.getWriteType() == AcidUtils.Operation.INSERT_ONLY) { for (int idx = 0; idx < fsp.outWriters.length; idx++) { RecordWriter outWriter = fsp.outWriters[idx]; if (outWriter != null) { @@ -1071,9 +1146,13 @@ public void closeOp(boolean abort) throws HiveException { } if (isNativeTable) { - fsp.commit(fs); + fsp.commit(fs, commitPaths); } } + if (conf.getMmWriteId() != null) { + Utilities.writeMmCommitManifest( + commitPaths, specPath, fs, taskId, conf.getMmWriteId(), unionPath); + } // Only publish stats if this operator's flag was set to gather stats if (conf.isGatherStats()) { publishStats(); @@ -1083,13 +1162,14 @@ public void closeOp(boolean abort) throws HiveException { // Hadoop always call close() even if an Exception was thrown in map() or // reduce(). for (FSPaths fsp : valToPaths.values()) { - fsp.abortWriters(fs, abort, !autoDelete && isNativeTable); + fsp.abortWriters(fs, abort, !autoDelete && isNativeTable && !conf.isMmTable()); } } fsp = prevFsp = null; super.closeOp(abort); } + /** * @return the name of the operator */ @@ -1108,12 +1188,27 @@ public void jobCloseOp(Configuration hconf, boolean success) try { if ((conf != null) && isNativeTable) { Path specPath = conf.getDirName(); + String unionSuffix = null; DynamicPartitionCtx dpCtx = conf.getDynPartCtx(); - if (conf.isLinkedFileSink() && (dpCtx != null)) { + ListBucketingCtx lbCtx = conf.getLbCtx(); + if (conf.isLinkedFileSink() && (dpCtx != null || conf.isMmTable())) { specPath = conf.getParentDir(); + unionSuffix = conf.getDirName().getName(); + } + Utilities.LOG14535.info("jobCloseOp using specPath " + specPath); + if (!conf.isMmTable()) { + Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, reporter); + } else { + int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), + lbLevels = lbCtx == null ? 0 : lbCtx.calculateListBucketingLevel(); + // TODO: why is it stored in both? + int numBuckets = (conf.getTable() != null) ? conf.getTable().getNumBuckets() + : (dpCtx != null ? dpCtx.getNumBuckets() : 0); + MissingBucketsContext mbc = new MissingBucketsContext( + conf.getTableInfo(), numBuckets, conf.getCompressed()); + Utilities.handleMmTableFinalPath(specPath, unionSuffix, hconf, success, + dpLevels, lbLevels, mbc, conf.getMmWriteId(), reporter); } - Utilities.mvFileToFinalPath(specPath, hconf, success, LOG, dpCtx, conf, - reporter); } } catch (IOException e) { throw new HiveException(e); @@ -1195,7 +1290,6 @@ private void publishStats() throws HiveException { for (Map.Entry entry : valToPaths.entrySet()) { String fspKey = entry.getKey(); // DP/LB FSPaths fspValue = entry.getValue(); - // for bucketed tables, hive.optimize.sort.dynamic.partition optimization // adds the taskId to the fspKey. if (conf.getDpSortState().equals(DPSortState.PARTITION_BUCKET_SORTED)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java new file mode 100644 index 000000000000..efa9bc37dca3 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitTask.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec; + +import org.apache.hadoop.hive.ql.DriverContext; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.Table; +import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; +import org.apache.hadoop.hive.ql.plan.api.StageType; +import org.apache.hadoop.util.StringUtils; + +public class ImportCommitTask extends Task { + + private static final long serialVersionUID = 1L; + + public ImportCommitTask() { + super(); + } + + @Override + public int execute(DriverContext driverContext) { + Utilities.LOG14535.info("Executing ImportCommit for " + work.getMmWriteId()); + + try { + if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { + return 0; + } + Hive db = getHive(); + Table tbl = db.getTable(work.getDbName(), work.getTblName()); + db.commitMmTableWrite(tbl, work.getMmWriteId()); + return 0; + } catch (Exception e) { + console.printError("Failed with exception " + e.getMessage(), "\n" + + StringUtils.stringifyException(e)); + setException(e); + return 1; + } + } + + @Override + public StageType getType() { + return StageType.MOVE; // The commit for import is normally done as part of MoveTask. + } + + @Override + public String getName() { + return "IMPORT_COMMIT"; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java new file mode 100644 index 000000000000..f62d23718dc8 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ImportCommitWork.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.exec; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.Explain.Level; + +@Explain(displayName = "Import Commit", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) +public class ImportCommitWork implements Serializable { + private static final long serialVersionUID = 1L; + private String dbName, tblName; + private long mmWriteId; + + public ImportCommitWork(String dbName, String tblName, long mmWriteId) { + this.mmWriteId = mmWriteId; + this.dbName = dbName; + this.tblName = tblName; + } + + public long getMmWriteId() { + return mmWriteId; + } + + public String getDbName() { + return dbName; + } + + public String getTblName() { + return tblName; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java index 82056c495013..fa25ba416e61 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java @@ -231,6 +231,7 @@ private void mvFileToFinalPath(Path specPath, Configuration hconf, // point, updates from speculative tasks still writing to tmpPath // will not appear in finalPath. log.info("Moving tmp dir: " + tmpPath + " to: " + intermediatePath); + Utilities.LOG14535.info("Moving tmp dir: " + tmpPath + " to: " + intermediatePath + "(spec " + specPath + ")"); Utilities.rename(fs, tmpPath, intermediatePath); // Step2: remove any tmp file or double-committed output files Utilities.removeTempOrDuplicateFiles(fs, intermediatePath); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 8265af4231cb..6e3ba986d995 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.InvalidOperationException; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.model.MMasterKey; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.exec.mr.MapRedTask; @@ -241,8 +242,23 @@ public boolean hasFollowingStatsTask() { return false; } + private final static class TaskInformation { + public List bucketCols = null; + public List sortCols = null; + public int numBuckets = -1; + public Task task; + public String path; + public TaskInformation(Task task, String path) { + this.task = task; + this.path = path; + } + } + @Override public int execute(DriverContext driverContext) { + Utilities.LOG14535.info("Executing MoveWork " + System.identityHashCode(work) + + " with " + work.getLoadFileWork() + "; " + work.getLoadTableWork() + "; " + + work.getLoadMultiFilesWork()); try { if (driverContext.getCtx().getExplainAnalyze() == AnalyzeState.RUNNING) { @@ -256,6 +272,7 @@ public int execute(DriverContext driverContext) { if (lfd != null) { Path targetPath = lfd.getTargetDir(); Path sourcePath = lfd.getSourcePath(); + Utilities.LOG14535.info("MoveTask moving LFD " + sourcePath + " to " + targetPath); moveFile(sourcePath, targetPath, lfd.getIsDfsDir()); } @@ -272,6 +289,7 @@ public int execute(DriverContext driverContext) { if (!fs.exists(destPath.getParent())) { fs.mkdirs(destPath.getParent()); } + Utilities.LOG14535.info("MoveTask moving LMFD " + srcPath + " to " + destPath); moveFile(srcPath, destPath, isDfsDir); i++; } @@ -292,75 +310,29 @@ public int execute(DriverContext driverContext) { mesg.append(')'); } String mesg_detail = " from " + tbd.getSourcePath(); + Utilities.LOG14535.info("" + mesg.toString() + " " + mesg_detail); console.printInfo(mesg.toString(), mesg_detail); Table table = db.getTable(tbd.getTable().getTableName()); - if (work.getCheckFileFormat()) { - // Get all files from the src directory - FileStatus[] dirs; - ArrayList files; - FileSystem srcFs; // source filesystem - try { - srcFs = tbd.getSourcePath().getFileSystem(conf); - dirs = srcFs.globStatus(tbd.getSourcePath()); - files = new ArrayList(); - for (int i = 0; (dirs != null && i < dirs.length); i++) { - files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER))); - // We only check one file, so exit the loop when we have at least - // one. - if (files.size() > 0) { - break; - } - } - } catch (IOException e) { - throw new HiveException( - "addFiles: filesystem error in check phase", e); - } + checkFileFormats(db, tbd, table); - // handle file format check for table level - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) { - boolean flag = true; - // work.checkFileFormat is set to true only for Load Task, so assumption here is - // dynamic partition context is null - if (tbd.getDPCtx() == null) { - if (tbd.getPartitionSpec() == null || tbd.getPartitionSpec().isEmpty()) { - // Check if the file format of the file matches that of the table. - flag = HiveFileFormatUtils.checkInputFormat( - srcFs, conf, tbd.getTable().getInputFileFormatClass(), files); - } else { - // Check if the file format of the file matches that of the partition - Partition oldPart = db.getPartition(table, tbd.getPartitionSpec(), false); - if (oldPart == null) { - // this means we have just created a table and are specifying partition in the - // load statement (without pre-creating the partition), in which case lets use - // table input format class. inheritTableSpecs defaults to true so when a new - // partition is created later it will automatically inherit input format - // from table object - flag = HiveFileFormatUtils.checkInputFormat( - srcFs, conf, tbd.getTable().getInputFileFormatClass(), files); - } else { - flag = HiveFileFormatUtils.checkInputFormat( - srcFs, conf, oldPart.getInputFormatClass(), files); - } - } - if (!flag) { - throw new HiveException( - "Wrong file format. Please check the file's format."); - } - } else { - LOG.warn("Skipping file format check as dpCtx is not null"); - } - } + boolean isAcid = work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID; + if (tbd.isMmTable() && isAcid) { + throw new HiveException("ACID and MM are not supported"); } // Create a data container DataContainer dc = null; if (tbd.getPartitionSpec().size() == 0) { dc = new DataContainer(table.getTTable()); + Utilities.LOG14535.info("loadTable called from " + tbd.getSourcePath() + " into " + tbd.getTable().getTableName()); + if (tbd.isMmTable() && !tbd.isCommitMmWrite()) { + throw new HiveException( + "Only single-partition LoadTableDesc can skip commiting write ID"); + } db.loadTable(tbd.getSourcePath(), tbd.getTable().getTableName(), tbd.getReplace(), - work.isSrcLocal(), isSkewedStoredAsDirs(tbd), - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, - hasFollowingStatsTask()); + work.isSrcLocal(), isSkewedStoredAsDirs(tbd), isAcid, hasFollowingStatsTask(), + tbd.getMmWriteId()); if (work.getOutputs() != null) { DDLTask.addIfAbsentByName(new WriteEntity(table, getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs()); @@ -369,154 +341,15 @@ public int execute(DriverContext driverContext) { LOG.info("Partition is: " + tbd.getPartitionSpec().toString()); // Check if the bucketing and/or sorting columns were inferred - List bucketCols = null; - List sortCols = null; - int numBuckets = -1; - Task task = this; - String path = tbd.getSourcePath().toUri().toString(); - // Find the first ancestor of this MoveTask which is some form of map reduce task - // (Either standard, local, or a merge) - while (task.getParentTasks() != null && task.getParentTasks().size() == 1) { - task = (Task)task.getParentTasks().get(0); - // If it was a merge task or a local map reduce task, nothing can be inferred - if (task instanceof MergeFileTask || task instanceof MapredLocalTask) { - break; - } - - // If it's a standard map reduce task, check what, if anything, it inferred about - // the directory this move task is moving - if (task instanceof MapRedTask) { - MapredWork work = (MapredWork)task.getWork(); - MapWork mapWork = work.getMapWork(); - bucketCols = mapWork.getBucketedColsByDirectory().get(path); - sortCols = mapWork.getSortedColsByDirectory().get(path); - if (work.getReduceWork() != null) { - numBuckets = work.getReduceWork().getNumReduceTasks(); - } - - if (bucketCols != null || sortCols != null) { - // This must be a final map reduce task (the task containing the file sink - // operator that writes the final output) - assert work.isFinalMapRed(); - } - break; - } - - // If it's a move task, get the path the files were moved from, this is what any - // preceding map reduce task inferred information about, and moving does not invalidate - // those assumptions - // This can happen when a conditional merge is added before the final MoveTask, but the - // condition for merging is not met, see GenMRFileSink1. - if (task instanceof MoveTask) { - if (((MoveTask)task).getWork().getLoadFileWork() != null) { - path = ((MoveTask)task).getWork().getLoadFileWork().getSourcePath().toUri().toString(); - } - } - } + TaskInformation ti = new TaskInformation(this, tbd.getSourcePath().toUri().toString()); + inferTaskInformation(ti); // deal with dynamic partitions DynamicPartitionCtx dpCtx = tbd.getDPCtx(); if (dpCtx != null && dpCtx.getNumDPCols() > 0) { // dynamic partitions - - List> dps = Utilities.getFullDPSpecs(conf, dpCtx); - - // publish DP columns to its subscribers - if (dps != null && dps.size() > 0) { - pushFeed(FeedType.DYNAMIC_PARTITIONS, dps); - } - console.printInfo(System.getProperty("line.separator")); - long startTime = System.currentTimeMillis(); - // load the list of DP partitions and return the list of partition specs - // TODO: In a follow-up to HIVE-1361, we should refactor loadDynamicPartitions - // to use Utilities.getFullDPSpecs() to get the list of full partSpecs. - // After that check the number of DPs created to not exceed the limit and - // iterate over it and call loadPartition() here. - // The reason we don't do inside HIVE-1361 is the latter is large and we - // want to isolate any potential issue it may introduce. - Map, Partition> dp = - db.loadDynamicPartitions( - tbd.getSourcePath(), - tbd.getTable().getTableName(), - tbd.getPartitionSpec(), - tbd.getReplace(), - dpCtx.getNumDPCols(), - isSkewedStoredAsDirs(tbd), - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, - SessionState.get().getTxnMgr().getCurrentTxnId(), hasFollowingStatsTask(), - work.getLoadTableWork().getWriteType()); - - String loadTime = "\t Time taken to load dynamic partitions: " + - (System.currentTimeMillis() - startTime)/1000.0 + " seconds"; - console.printInfo(loadTime); - LOG.info(loadTime); - - if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) { - throw new HiveException("This query creates no partitions." + - " To turn off this error, set hive.error.on.empty.partition=false."); - } - - startTime = System.currentTimeMillis(); - // for each partition spec, get the partition - // and put it to WriteEntity for post-exec hook - for(Map.Entry, Partition> entry : dp.entrySet()) { - Partition partn = entry.getValue(); - - if (bucketCols != null || sortCols != null) { - updatePartitionBucketSortColumns( - db, table, partn, bucketCols, numBuckets, sortCols); - } - - WriteEntity enty = new WriteEntity(partn, - getWriteType(tbd, work.getLoadTableWork().getWriteType())); - if (work.getOutputs() != null) { - DDLTask.addIfAbsentByName(enty, work.getOutputs()); - } - // Need to update the queryPlan's output as well so that post-exec hook get executed. - // This is only needed for dynamic partitioning since for SP the the WriteEntity is - // constructed at compile time and the queryPlan already contains that. - // For DP, WriteEntity creation is deferred at this stage so we need to update - // queryPlan here. - if (queryPlan.getOutputs() == null) { - queryPlan.setOutputs(new LinkedHashSet()); - } - queryPlan.getOutputs().add(enty); - - // update columnar lineage for each partition - dc = new DataContainer(table.getTTable(), partn.getTPartition()); - - // Don't set lineage on delete as we don't have all the columns - if (SessionState.get() != null && - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.DELETE && - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.UPDATE) { - SessionState.get().getLineageState().setLineage(tbd.getSourcePath(), dc, - table.getCols()); - } - LOG.info("\tLoading partition " + entry.getKey()); - } - console.printInfo("\t Time taken for adding to write entity : " + - (System.currentTimeMillis() - startTime)/1000.0 + " seconds"); - dc = null; // reset data container to prevent it being added again. + dc = handleDynParts(db, table, tbd, ti, dpCtx); } else { // static partitions - List partVals = MetaStoreUtils.getPvals(table.getPartCols(), - tbd.getPartitionSpec()); - db.validatePartitionNameCharacters(partVals); - db.loadPartition(tbd.getSourcePath(), tbd.getTable().getTableName(), - tbd.getPartitionSpec(), tbd.getReplace(), - tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(), - work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, hasFollowingStatsTask()); - Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); - - if (bucketCols != null || sortCols != null) { - updatePartitionBucketSortColumns(db, table, partn, bucketCols, - numBuckets, sortCols); - } - - dc = new DataContainer(table.getTTable(), partn.getTPartition()); - // add this partition to post-execution hook - if (work.getOutputs() != null) { - DDLTask.addIfAbsentByName(new WriteEntity(partn, - getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs()); - } - } + dc = handleStaticParts(db, table, tbd, ti); + } } if (SessionState.get() != null && dc != null) { // If we are doing an update or a delete the number of columns in the table will not @@ -549,6 +382,230 @@ public int execute(DriverContext driverContext) { return (1); } } + + private DataContainer handleStaticParts(Hive db, Table table, LoadTableDesc tbd, + TaskInformation ti) throws HiveException, IOException, InvalidOperationException { + List partVals = MetaStoreUtils.getPvals(table.getPartCols(), tbd.getPartitionSpec()); + db.validatePartitionNameCharacters(partVals); + Utilities.LOG14535.info("loadPartition called from " + tbd.getSourcePath() + + " into " + tbd.getTable().getTableName()); + boolean isCommitMmWrite = tbd.isCommitMmWrite(); + db.loadSinglePartition(tbd.getSourcePath(), tbd.getTable().getTableName(), + tbd.getPartitionSpec(), tbd.getReplace(), + tbd.getInheritTableSpecs(), isSkewedStoredAsDirs(tbd), work.isSrcLocal(), + (work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID && + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.INSERT_ONLY), + hasFollowingStatsTask(), tbd.getMmWriteId(), isCommitMmWrite); + Partition partn = db.getPartition(table, tbd.getPartitionSpec(), false); + + if (ti.bucketCols != null || ti.sortCols != null) { + updatePartitionBucketSortColumns(db, table, partn, ti.bucketCols, + ti.numBuckets, ti.sortCols); + } + + DataContainer dc = new DataContainer(table.getTTable(), partn.getTPartition()); + // add this partition to post-execution hook + if (work.getOutputs() != null) { + DDLTask.addIfAbsentByName(new WriteEntity(partn, + getWriteType(tbd, work.getLoadTableWork().getWriteType())), work.getOutputs()); + } + return dc; + } + + private DataContainer handleDynParts(Hive db, Table table, LoadTableDesc tbd, + TaskInformation ti, DynamicPartitionCtx dpCtx) throws HiveException, + IOException, InvalidOperationException { + DataContainer dc; + List> dps = Utilities.getFullDPSpecs(conf, dpCtx); + + // publish DP columns to its subscribers + if (dps != null && dps.size() > 0) { + pushFeed(FeedType.DYNAMIC_PARTITIONS, dps); + } + console.printInfo(System.getProperty("line.separator")); + long startTime = System.currentTimeMillis(); + // load the list of DP partitions and return the list of partition specs + // TODO: In a follow-up to HIVE-1361, we should refactor loadDynamicPartitions + // to use Utilities.getFullDPSpecs() to get the list of full partSpecs. + // After that check the number of DPs created to not exceed the limit and + // iterate over it and call loadPartition() here. + // The reason we don't do inside HIVE-1361 is the latter is large and we + // want to isolate any potential issue it may introduce. + if (tbd.isMmTable() && !tbd.isCommitMmWrite()) { + throw new HiveException("Only single-partition LoadTableDesc can skip commiting write ID"); + } + Map, Partition> dp = + db.loadDynamicPartitions( + tbd.getSourcePath(), + tbd.getTable().getTableName(), + tbd.getPartitionSpec(), + tbd.getReplace(), + dpCtx.getNumDPCols(), + (tbd.getLbCtx() == null) ? 0 : tbd.getLbCtx().calculateListBucketingLevel(), + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID, + SessionState.get().getTxnMgr().getCurrentTxnId(), hasFollowingStatsTask(), + work.getLoadTableWork().getWriteType(), + tbd.getMmWriteId()); + + String loadTime = "\t Time taken to load dynamic partitions: " + + (System.currentTimeMillis() - startTime)/1000.0 + " seconds"; + console.printInfo(loadTime); + LOG.info(loadTime); + + if (dp.size() == 0 && conf.getBoolVar(HiveConf.ConfVars.HIVE_ERROR_ON_EMPTY_PARTITION)) { + throw new HiveException("This query creates no partitions." + + " To turn off this error, set hive.error.on.empty.partition=false."); + } + + startTime = System.currentTimeMillis(); + // for each partition spec, get the partition + // and put it to WriteEntity for post-exec hook + for(Map.Entry, Partition> entry : dp.entrySet()) { + Partition partn = entry.getValue(); + + if (ti.bucketCols != null || ti.sortCols != null) { + updatePartitionBucketSortColumns( + db, table, partn, ti.bucketCols, ti.numBuckets, ti.sortCols); + } + + WriteEntity enty = new WriteEntity(partn, + getWriteType(tbd, work.getLoadTableWork().getWriteType())); + if (work.getOutputs() != null) { + DDLTask.addIfAbsentByName(enty, work.getOutputs()); + } + // Need to update the queryPlan's output as well so that post-exec hook get executed. + // This is only needed for dynamic partitioning since for SP the the WriteEntity is + // constructed at compile time and the queryPlan already contains that. + // For DP, WriteEntity creation is deferred at this stage so we need to update + // queryPlan here. + if (queryPlan.getOutputs() == null) { + queryPlan.setOutputs(new LinkedHashSet()); + } + queryPlan.getOutputs().add(enty); + + // update columnar lineage for each partition + dc = new DataContainer(table.getTTable(), partn.getTPartition()); + + // Don't set lineage on delete as we don't have all the columns + if (SessionState.get() != null && + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.DELETE && + work.getLoadTableWork().getWriteType() != AcidUtils.Operation.UPDATE) { + SessionState.get().getLineageState().setLineage(tbd.getSourcePath(), dc, + table.getCols()); + } + LOG.info("\tLoading partition " + entry.getKey()); + } + console.printInfo("\t Time taken for adding to write entity : " + + (System.currentTimeMillis() - startTime)/1000.0 + " seconds"); + dc = null; // reset data container to prevent it being added again. + return dc; + } + + private void inferTaskInformation(TaskInformation ti) { + // Find the first ancestor of this MoveTask which is some form of map reduce task + // (Either standard, local, or a merge) + while (ti.task.getParentTasks() != null && ti.task.getParentTasks().size() == 1) { + ti.task = (Task)ti.task.getParentTasks().get(0); + // If it was a merge task or a local map reduce task, nothing can be inferred + if (ti.task instanceof MergeFileTask || ti.task instanceof MapredLocalTask) { + break; + } + + // If it's a standard map reduce task, check what, if anything, it inferred about + // the directory this move task is moving + if (ti.task instanceof MapRedTask) { + MapredWork work = (MapredWork)ti.task.getWork(); + MapWork mapWork = work.getMapWork(); + ti.bucketCols = mapWork.getBucketedColsByDirectory().get(ti.path); + ti.sortCols = mapWork.getSortedColsByDirectory().get(ti.path); + if (work.getReduceWork() != null) { + ti.numBuckets = work.getReduceWork().getNumReduceTasks(); + } + + if (ti.bucketCols != null || ti.sortCols != null) { + // This must be a final map reduce task (the task containing the file sink + // operator that writes the final output) + assert work.isFinalMapRed(); + } + break; + } + + // If it's a move task, get the path the files were moved from, this is what any + // preceding map reduce task inferred information about, and moving does not invalidate + // those assumptions + // This can happen when a conditional merge is added before the final MoveTask, but the + // condition for merging is not met, see GenMRFileSink1. + if (ti.task instanceof MoveTask) { + MoveTask mt = (MoveTask)ti.task; + if (mt.getWork().getLoadFileWork() != null) { + ti.path = mt.getWork().getLoadFileWork().getSourcePath().toUri().toString(); + } + } + } + } + + private void checkFileFormats(Hive db, LoadTableDesc tbd, Table table) + throws HiveException { + if (work.getCheckFileFormat()) { + // Get all files from the src directory + FileStatus[] dirs; + ArrayList files; + FileSystem srcFs; // source filesystem + try { + srcFs = tbd.getSourcePath().getFileSystem(conf); + dirs = srcFs.globStatus(tbd.getSourcePath()); + files = new ArrayList(); + for (int i = 0; (dirs != null && i < dirs.length); i++) { + files.addAll(Arrays.asList(srcFs.listStatus(dirs[i].getPath(), FileUtils.HIDDEN_FILES_PATH_FILTER))); + // We only check one file, so exit the loop when we have at least + // one. + if (files.size() > 0) { + break; + } + } + } catch (IOException e) { + throw new HiveException( + "addFiles: filesystem error in check phase", e); + } + + // handle file format check for table level + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) { + boolean flag = true; + // work.checkFileFormat is set to true only for Load Task, so assumption here is + // dynamic partition context is null + if (tbd.getDPCtx() == null) { + if (tbd.getPartitionSpec() == null || tbd.getPartitionSpec().isEmpty()) { + // Check if the file format of the file matches that of the table. + flag = HiveFileFormatUtils.checkInputFormat( + srcFs, conf, tbd.getTable().getInputFileFormatClass(), files); + } else { + // Check if the file format of the file matches that of the partition + Partition oldPart = db.getPartition(table, tbd.getPartitionSpec(), false); + if (oldPart == null) { + // this means we have just created a table and are specifying partition in the + // load statement (without pre-creating the partition), in which case lets use + // table input format class. inheritTableSpecs defaults to true so when a new + // partition is created later it will automatically inherit input format + // from table object + flag = HiveFileFormatUtils.checkInputFormat( + srcFs, conf, tbd.getTable().getInputFileFormatClass(), files); + } else { + flag = HiveFileFormatUtils.checkInputFormat( + srcFs, conf, oldPart.getInputFormatClass(), files); + } + } + if (!flag) { + throw new HiveException( + "Wrong file format. Please check the file's format."); + } + } else { + LOG.warn("Skipping file format check as dpCtx is not null"); + } + } + } + } + + /** * so to make sure we crate WriteEntity with the right WriteType. This is (at this point) only * for consistency since LockManager (which is the only thing that pays attention to WriteType) @@ -567,6 +624,7 @@ WriteEntity.WriteType getWriteType(LoadTableDesc tbd, AcidUtils.Operation operat return WriteEntity.WriteType.INSERT; } } + private boolean isSkewedStoredAsDirs(LoadTableDesc tbd) { return (tbd.getLbCtx() == null) ? false : tbd.getLbCtx() .isSkewedStoredAsDir(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java index e3cb765e0a82..a845b5022749 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/OrcFileMergeOperator.java @@ -75,6 +75,7 @@ public void process(Object row, int tag) throws HiveException { private void processKeyValuePairs(Object key, Object value) throws HiveException { String filePath = ""; + boolean exception = false; try { OrcFileValueWrapper v; OrcFileKeyWrapper k; @@ -87,12 +88,15 @@ private void processKeyValuePairs(Object key, Object value) // skip incompatible file, files that are missing stripe statistics are set to incompatible if (k.isIncompatFile()) { LOG.warn("Incompatible ORC file merge! Stripe statistics is missing. " + k.getInputPath()); - incompatFileSet.add(k.getInputPath()); + addIncompatibleFile(k.getInputPath()); return; } filePath = k.getInputPath().toUri().getPath(); + Utilities.LOG14535.info("OrcFileMergeOperator processing " + filePath); + + fixTmpPath(k.getInputPath().getParent()); v = (OrcFileValueWrapper) value; @@ -126,6 +130,7 @@ private void processKeyValuePairs(Object key, Object value) options.bufferSize(compressBuffSize).enforceBufferSize(); } + Path outPath = getOutPath(); outWriter = OrcFile.createWriter(outPath, options); if (isLogDebugEnabled) { LOG.info("ORC merge file output path: " + outPath); @@ -133,7 +138,7 @@ private void processKeyValuePairs(Object key, Object value) } if (!checkCompatibility(k)) { - incompatFileSet.add(k.getInputPath()); + addIncompatibleFile(k.getInputPath()); return; } @@ -164,7 +169,7 @@ private void processKeyValuePairs(Object key, Object value) outWriter.appendUserMetadata(v.getUserMetadata()); } } catch (Throwable e) { - this.exception = true; + exception = true; LOG.error("Closing operator..Exception: " + ExceptionUtils.getStackTrace(e)); throw new HiveException(e); } finally { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java index 4dea1d20cf57..349b459f418c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/RCFileMergeOperator.java @@ -77,7 +77,7 @@ private void processKeyValuePairs(Object k, Object v) codec = key.getCodec(); columnNumber = key.getKeyBuffer().getColumnNumber(); RCFileOutputFormat.setColumnNumber(jc, columnNumber); - outWriter = new RCFile.Writer(fs, jc, outPath, null, codec); + outWriter = new RCFile.Writer(fs, jc, getOutPath(), null, codec); } boolean sameCodec = ((codec == key.getCodec()) || codec.getClass().equals( @@ -94,7 +94,6 @@ private void processKeyValuePairs(Object k, Object v) key.getRecordLength(), key.getKeyLength(), key.getCompressedKeyLength()); } catch (Throwable e) { - this.exception = true; closeOp(true); throw new HiveException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java index 14fd61a99e62..1cd20e392a02 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java @@ -100,6 +100,8 @@ public TaskTuple(Class workClass, Class> taskClass) { MergeFileTask.class)); taskvec.add(new TaskTuple(DependencyCollectionWork.class, DependencyCollectionTask.class)); + taskvec.add(new TaskTuple(ImportCommitWork.class, + ImportCommitTask.class)); taskvec.add(new TaskTuple(PartialScanWork.class, PartialScanTask.class)); taskvec.add(new TaskTuple(IndexMetadataChangeWork.class, @@ -145,6 +147,7 @@ public static Task get(Class workClass, throw new RuntimeException("No task for work class " + workClass.getName()); } + @SafeVarargs public static Task get(T work, HiveConf conf, Task... tasklist) { Task ret = get((Class) work.getClass(), conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index a9dbc3e6b214..6774d4d160d1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -35,7 +35,6 @@ import java.io.OutputStream; import java.io.Serializable; import java.net.URI; -import java.net.URISyntaxException; import java.net.URL; import java.net.URLClassLoader; import java.net.URLDecoder; @@ -50,7 +49,6 @@ import java.util.Arrays; import java.util.Calendar; import java.util.Collection; -import java.util.Collections; import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; @@ -83,10 +81,14 @@ import org.apache.hadoop.filecache.DistributedCache; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.HiveInterruptCallback; @@ -94,6 +96,7 @@ import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.JavaUtils; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreUtils; @@ -123,6 +126,7 @@ import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; import org.apache.hadoop.hive.ql.io.IOConstants; +import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.io.OneNullRowInputFormat; import org.apache.hadoop.hive.ql.io.RCFile; import org.apache.hadoop.hive.ql.io.ReworkMapredInputFormat; @@ -161,6 +165,7 @@ import org.apache.hadoop.hive.ql.stats.StatsPublisher; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; +import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.SerDeUtils; import org.apache.hadoop.hive.serde2.Serializer; @@ -192,6 +197,7 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hadoop.mapred.SequenceFileOutputFormat; +import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Shell; import org.apache.hive.common.util.ReflectionUtil; @@ -199,15 +205,19 @@ import org.slf4j.LoggerFactory; import com.esotericsoftware.kryo.Kryo; +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; /** * Utilities. * */ -@SuppressWarnings("nls") +@SuppressWarnings({ "nls", "deprecation" }) public final class Utilities { + // TODO# remove when merging; convert some statements to local loggers, remove others + public static final Logger LOG14535 = LoggerFactory.getLogger("Log14535"); + /** * The object in the reducer are composed of these top level fields. */ @@ -640,14 +650,15 @@ protected Expression instantiate(Object oldInstance, Encoder out) { } @Override - protected void initialize(Class type, Object oldInstance, Object newInstance, Encoder out) { - Iterator ite = ((Collection) oldInstance).iterator(); + protected void initialize(Class type, Object oldInstance, Object newInstance, Encoder out) { + Iterator ite = ((Collection) oldInstance).iterator(); while (ite.hasNext()) { out.writeStatement(new Statement(oldInstance, "add", new Object[] {ite.next()})); } } } + @VisibleForTesting public static TableDesc defaultTd; static { // by default we expect ^A separated strings @@ -655,7 +666,16 @@ protected void initialize(Class type, Object oldInstance, Object newInstance, En // PlanUtils.getDefaultTableDesc(String separatorCode, String columns) // or getBinarySortableTableDesc(List fieldSchemas) when // we know the column names. - defaultTd = PlanUtils.getDefaultTableDesc("" + Utilities.ctrlaCode); + /** + * Generate the table descriptor of MetadataTypedColumnsetSerDe with the + * separatorCode. MetaDataTypedColumnsetSerDe is used because LazySimpleSerDe + * does not support a table with a single column "col" with type + * "array". + */ + defaultTd = new TableDesc(TextInputFormat.class, IgnoreKeyTextOutputFormat.class, + Utilities.makeProperties(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, + "" + Utilities.ctrlaCode, serdeConstants.SERIALIZATION_LIB, + MetadataTypedColumnsetSerDe.class.getName())); } public static final int carriageReturnCode = 13; @@ -1394,7 +1414,7 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, boolean success, Logger log, DynamicPartitionCtx dpCtx, FileSinkDesc conf, Reporter reporter) throws IOException, HiveException { - + FileSystem fs = specPath.getFileSystem(hconf); Path tmpPath = Utilities.toTempPath(specPath); Path taskTmpPath = Utilities.toTaskTempPath(specPath); @@ -1405,27 +1425,31 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin("FileSinkOperator", "RemoveTempOrDuplicateFiles"); // remove any tmp file or double-committed output files - List emptyBuckets = Utilities.removeTempOrDuplicateFiles(fs, statuses, dpCtx, conf, hconf); + List emptyBuckets = Utilities.removeTempOrDuplicateFiles( + fs, statuses, dpCtx, conf, hconf); perfLogger.PerfLogEnd("FileSinkOperator", "RemoveTempOrDuplicateFiles"); // create empty buckets if necessary if (emptyBuckets.size() > 0) { perfLogger.PerfLogBegin("FileSinkOperator", "CreateEmptyBuckets"); - createEmptyBuckets(hconf, emptyBuckets, conf, reporter); + createEmptyBuckets( + hconf, emptyBuckets, conf.getCompressed(), conf.getTableInfo(), reporter); perfLogger.PerfLogEnd("FileSinkOperator", "CreateEmptyBuckets"); } - // move to the file destination - log.info("Moving tmp dir: " + tmpPath + " to: " + specPath); + Utilities.LOG14535.info("Moving tmp dir: " + tmpPath + " to: " + specPath); perfLogger.PerfLogBegin("FileSinkOperator", "RenameOrMoveFiles"); Utilities.renameOrMoveFiles(fs, tmpPath, specPath); perfLogger.PerfLogEnd("FileSinkOperator", "RenameOrMoveFiles"); } } else { + Utilities.LOG14535.info("deleting tmpPath " + tmpPath); fs.delete(tmpPath, true); } + Utilities.LOG14535.info("deleting taskTmpPath " + taskTmpPath); fs.delete(taskTmpPath, true); } + /** * Check the existence of buckets according to bucket specification. Create empty buckets if * needed. @@ -1437,8 +1461,8 @@ public static void mvFileToFinalPath(Path specPath, Configuration hconf, * @throws HiveException * @throws IOException */ - private static void createEmptyBuckets(Configuration hconf, List paths, - FileSinkDesc conf, Reporter reporter) + static void createEmptyBuckets(Configuration hconf, List paths, + boolean isCompressed, TableDesc tableInfo, Reporter reporter) throws HiveException, IOException { JobConf jc; @@ -1450,13 +1474,11 @@ private static void createEmptyBuckets(Configuration hconf, List paths, } HiveOutputFormat hiveOutputFormat = null; Class outputClass = null; - boolean isCompressed = conf.getCompressed(); - TableDesc tableInfo = conf.getTableInfo(); try { Serializer serializer = (Serializer) tableInfo.getDeserializerClass().newInstance(); serializer.initialize(null, tableInfo.getProperties()); outputClass = serializer.getSerializedClass(); - hiveOutputFormat = HiveFileFormatUtils.getHiveOutputFormat(hconf, conf.getTableInfo()); + hiveOutputFormat = HiveFileFormatUtils.getHiveOutputFormat(hconf, tableInfo); } catch (SerDeException e) { throw new HiveException(e); } catch (InstantiationException e) { @@ -1466,6 +1488,7 @@ private static void createEmptyBuckets(Configuration hconf, List paths, } for (Path path : paths) { + Utilities.LOG14535.info("creating empty bucket for " + path); RecordWriter writer = HiveFileFormatUtils.getRecordWriter( jc, hiveOutputFormat, outputClass, isCompressed, tableInfo.getProperties(), path, reporter); @@ -1498,142 +1521,198 @@ public static List removeTempOrDuplicateFiles(FileSystem fs, Path path, */ public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats, DynamicPartitionCtx dpCtx, FileSinkDesc conf, Configuration hconf) throws IOException { + int dpLevels = dpCtx == null ? 0 : dpCtx.getNumDPCols(), + numBuckets = (conf != null && conf.getTable() != null) + ? conf.getTable().getNumBuckets() : 0; + return removeTempOrDuplicateFiles(fs, fileStats, dpLevels, numBuckets, hconf, null); + } + + private static boolean removeEmptyDpDirectory(FileSystem fs, Path path) throws IOException { + FileStatus[] items = fs.listStatus(path); + // remove empty directory since DP insert should not generate empty partitions. + // empty directories could be generated by crashed Task/ScriptOperator + if (items.length != 0) return false; + if (!fs.delete(path, true)) { + LOG.error("Cannot delete empty directory " + path); + throw new IOException("Cannot delete empty directory " + path); + } + return true; + } + + public static List removeTempOrDuplicateFiles(FileSystem fs, FileStatus[] fileStats, + int dpLevels, int numBuckets, Configuration hconf, Long mmWriteId) throws IOException { if (fileStats == null) { return null; } - List result = new ArrayList(); HashMap taskIDToFile = null; - if (dpCtx != null) { + if (dpLevels > 0) { FileStatus parts[] = fileStats; - for (int i = 0; i < parts.length; ++i) { - assert parts[i].isDir() : "dynamic partition " + parts[i].getPath() + assert parts[i].isDirectory() : "dynamic partition " + parts[i].getPath() + " is not a directory"; - FileStatus[] items = fs.listStatus(parts[i].getPath()); - - // remove empty directory since DP insert should not generate empty partitions. - // empty directories could be generated by crashed Task/ScriptOperator - if (items.length == 0) { - if (!fs.delete(parts[i].getPath(), true)) { - LOG.error("Cannot delete empty directory " + parts[i].getPath()); - throw new IOException("Cannot delete empty directory " + parts[i].getPath()); - } + Path path = parts[i].getPath(); + Utilities.LOG14535.info("removeTempOrDuplicateFiles looking at DP " + path); + if (removeEmptyDpDirectory(fs, path)) { + parts[i] = null; + continue; } + FileStatus[] items = fs.listStatus(path); - taskIDToFile = removeTempOrDuplicateFiles(items, fs); - // if the table is bucketed and enforce bucketing, we should check and generate all buckets - if (dpCtx.getNumBuckets() > 0 && taskIDToFile != null && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { - // refresh the file list - items = fs.listStatus(parts[i].getPath()); - // get the missing buckets and generate empty buckets - String taskID1 = taskIDToFile.keySet().iterator().next(); - Path bucketPath = taskIDToFile.values().iterator().next().getPath(); - for (int j = 0; j < dpCtx.getNumBuckets(); ++j) { - String taskID2 = replaceTaskId(taskID1, j); - if (!taskIDToFile.containsKey(taskID2)) { - // create empty bucket, file name should be derived from taskID2 - URI bucketUri = bucketPath.toUri(); - String path2 = replaceTaskIdFromFilename(bucketUri.getPath().toString(), j); - result.add(new Path(bucketUri.getScheme(), bucketUri.getAuthority(), path2)); - } + if (mmWriteId != null) { + Path mmDir = parts[i].getPath(); + if (!mmDir.getName().equals(ValidWriteIds.getMmFilePrefix(mmWriteId))) { + throw new IOException("Unexpected non-MM directory name " + mmDir); } + Utilities.LOG14535.info("removeTempOrDuplicateFiles processing files in MM directory " + mmDir); } + taskIDToFile = removeTempOrDuplicateFilesNonMm(items, fs); + + // TODO: not clear why two if conditions are different. Preserve the existing logic for now. + addBucketFileToResults(taskIDToFile, numBuckets, hconf, result); } } else { FileStatus[] items = fileStats; if (items.length == 0) { return result; } - taskIDToFile = removeTempOrDuplicateFiles(items, fs); - if(taskIDToFile != null && taskIDToFile.size() > 0 && conf != null && conf.getTable() != null - && (conf.getTable().getNumBuckets() > taskIDToFile.size()) && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { - // get the missing buckets and generate empty buckets for non-dynamic partition - String taskID1 = taskIDToFile.keySet().iterator().next(); - Path bucketPath = taskIDToFile.values().iterator().next().getPath(); - for (int j = 0; j < conf.getTable().getNumBuckets(); ++j) { - String taskID2 = replaceTaskId(taskID1, j); - if (!taskIDToFile.containsKey(taskID2)) { - // create empty bucket, file name should be derived from taskID2 - URI bucketUri = bucketPath.toUri(); - String path2 = replaceTaskIdFromFilename(bucketUri.getPath().toString(), j); - result.add(new Path(bucketUri.getScheme(), bucketUri.getAuthority(), path2)); - } + if (mmWriteId == null) { + taskIDToFile = removeTempOrDuplicateFilesNonMm(items, fs); + } else { + if (items.length > 1) { + throw new IOException("Unexpected directories for non-DP MM: " + Arrays.toString(items)); } + Path mmDir = items[0].getPath(); + if (!mmDir.getName().equals(ValidWriteIds.getMmFilePrefix(mmWriteId))) { + throw new IOException("Unexpected non-MM directory " + mmDir); + } + Utilities.LOG14535.info( + "removeTempOrDuplicateFiles processing files in MM directory " + mmDir); + taskIDToFile = removeTempOrDuplicateFilesNonMm(fs.listStatus(mmDir), fs); } + // TODO: not clear why two if conditions are different. Preserve the existing logic for now. + addBucketFileToResults2(taskIDToFile, numBuckets, hconf, result); } return result; } - public static HashMap removeTempOrDuplicateFiles(FileStatus[] items, - FileSystem fs) throws IOException { + // TODO: not clear why two if conditions are different. Preserve the existing logic for now. + private static void addBucketFileToResults2(HashMap taskIDToFile, + int numBuckets, Configuration hconf, List result) { + if(taskIDToFile != null && taskIDToFile.size() > 0 && (numBuckets > taskIDToFile.size()) + && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { + addBucketsToResultsCommon(taskIDToFile, numBuckets, result); + } + } - if (items == null || fs == null) { - return null; + // TODO: not clear why two if conditions are different. Preserve the existing logic for now. + private static void addBucketFileToResults(HashMap taskIDToFile, + int numBuckets, Configuration hconf, List result) { + // if the table is bucketed and enforce bucketing, we should check and generate all buckets + if (numBuckets > 0 && taskIDToFile != null + && !"tez".equalsIgnoreCase(hconf.get(ConfVars.HIVE_EXECUTION_ENGINE.varname))) { + addBucketsToResultsCommon(taskIDToFile, numBuckets, result); + } + } + + private static void addBucketsToResultsCommon( + HashMap taskIDToFile, int numBuckets, List result) { + String taskID1 = taskIDToFile.keySet().iterator().next(); + Path bucketPath = taskIDToFile.values().iterator().next().getPath(); + Utilities.LOG14535.info("Bucket path " + bucketPath); + for (int j = 0; j < numBuckets; ++j) { + addBucketFileIfMissing(result, taskIDToFile, taskID1, bucketPath, j); + } + } + + private static void addBucketFileIfMissing(List result, + HashMap taskIDToFile, String taskID1, Path bucketPath, int j) { + String taskID2 = replaceTaskId(taskID1, j); + if (!taskIDToFile.containsKey(taskID2)) { + // create empty bucket, file name should be derived from taskID2 + URI bucketUri = bucketPath.toUri(); + String path2 = replaceTaskIdFromFilename(bucketUri.getPath().toString(), j); + Utilities.LOG14535.info("Creating an empty bucket file " + path2); + result.add(new Path(bucketUri.getScheme(), bucketUri.getAuthority(), path2)); } + } + private static HashMap removeTempOrDuplicateFilesNonMm( + FileStatus[] files, FileSystem fs) throws IOException { + if (files == null || fs == null) { + return null; + } HashMap taskIdToFile = new HashMap(); - for (FileStatus one : items) { + for (FileStatus one : files) { if (isTempPath(one)) { + Utilities.LOG14535.info("removeTempOrDuplicateFiles deleting " + one.getPath()/*, new Exception()*/); if (!fs.delete(one.getPath(), true)) { throw new IOException("Unable to delete tmp file: " + one.getPath()); } } else { - String taskId = getPrefixedTaskIdFromFilename(one.getPath().getName()); - FileStatus otherFile = taskIdToFile.get(taskId); - if (otherFile == null) { - taskIdToFile.put(taskId, one); - } else { - // Compare the file sizes of all the attempt files for the same task, the largest win - // any attempt files could contain partial results (due to task failures or - // speculative runs), but the largest should be the correct one since the result - // of a successful run should never be smaller than a failed/speculative run. - FileStatus toDelete = null; - - // "LOAD .. INTO" and "INSERT INTO" commands will generate files with - // "_copy_x" suffix. These files are usually read by map tasks and the - // task output gets written to some tmp path. The output file names will - // be of format taskId_attemptId. The usual path for all these tasks is - // srcPath -> taskTmpPath -> tmpPath -> finalPath. - // But, MergeFileTask can move files directly from src path to final path - // without copying it to tmp path. In such cases, different files with - // "_copy_x" suffix will be identified as duplicates (change in value - // of x is wrongly identified as attempt id) and will be deleted. - // To avoid that we will ignore files with "_copy_x" suffix from duplicate - // elimination. - if (!isCopyFile(one.getPath().getName())) { - if (otherFile.getLen() >= one.getLen()) { - toDelete = one; - } else { - toDelete = otherFile; - taskIdToFile.put(taskId, one); - } - long len1 = toDelete.getLen(); - long len2 = taskIdToFile.get(taskId).getLen(); - if (!fs.delete(toDelete.getPath(), true)) { - throw new IOException( - "Unable to delete duplicate file: " + toDelete.getPath() - + ". Existing file: " + - taskIdToFile.get(taskId).getPath()); - } else { - LOG.warn("Duplicate taskid file removed: " + toDelete.getPath() + - " with length " - + len1 + ". Existing file: " + - taskIdToFile.get(taskId).getPath() + " with length " - + len2); - } - } else { - LOG.info(one.getPath() + " file identified as duplicate. This file is" + - " not deleted as it has copySuffix."); - } - } + // This would be a single file. See if we need to remove it. + ponderRemovingTempOrDuplicateFile(fs, one, taskIdToFile); } } return taskIdToFile; } + private static void ponderRemovingTempOrDuplicateFile(FileSystem fs, + FileStatus file, HashMap taskIdToFile) throws IOException { + String taskId = getPrefixedTaskIdFromFilename(file.getPath().getName()); + Utilities.LOG14535.info("removeTempOrDuplicateFiles pondering " + file.getPath() + ", taskId " + taskId); + + FileStatus otherFile = taskIdToFile.get(taskId); + taskIdToFile.put(taskId, (otherFile == null) ? file : + compareTempOrDuplicateFiles(fs, file, otherFile)); + } + + private static FileStatus compareTempOrDuplicateFiles(FileSystem fs, + FileStatus file, FileStatus existingFile) throws IOException { + // Compare the file sizes of all the attempt files for the same task, the largest win + // any attempt files could contain partial results (due to task failures or + // speculative runs), but the largest should be the correct one since the result + // of a successful run should never be smaller than a failed/speculative run. + FileStatus toDelete = null, toRetain = null; + + // "LOAD .. INTO" and "INSERT INTO" commands will generate files with + // "_copy_x" suffix. These files are usually read by map tasks and the + // task output gets written to some tmp path. The output file names will + // be of format taskId_attemptId. The usual path for all these tasks is + // srcPath -> taskTmpPath -> tmpPath -> finalPath. + // But, MergeFileTask can move files directly from src path to final path + // without copying it to tmp path. In such cases, different files with + // "_copy_x" suffix will be identified as duplicates (change in value + // of x is wrongly identified as attempt id) and will be deleted. + // To avoid that we will ignore files with "_copy_x" suffix from duplicate + // elimination. + if (isCopyFile(file.getPath().getName())) { + LOG.info(file.getPath() + " file identified as duplicate. This file is" + + " not deleted as it has copySuffix."); + return existingFile; + } + + if (existingFile.getLen() >= file.getLen()) { + toDelete = file; + toRetain = existingFile; + } else { + toDelete = existingFile; + toRetain = file; + } + if (!fs.delete(toDelete.getPath(), true)) { + throw new IOException( + "Unable to delete duplicate file: " + toDelete.getPath() + + ". Existing file: " + toRetain.getPath()); + } else { + LOG.warn("Duplicate taskid file removed: " + toDelete.getPath() + " with length " + + toDelete.getLen() + ". Existing file: " + toRetain.getPath() + " with length " + + toRetain.getLen()); + } + return toRetain; + } + public static boolean isCopyFile(String filename) { String taskId = filename; String copyFileSuffix = null; @@ -2974,8 +3053,9 @@ public static List getInputPaths(JobConf job, MapWork work, Path hiveScrat // The alias may not have any path Path path = null; - for (Path file : new LinkedList(work.getPathToAliases().keySet())) { - List aliases = work.getPathToAliases().get(file); + for (Map.Entry> e : work.getPathToAliases().entrySet()) { + Path file = e.getKey(); + List aliases = e.getValue(); if (aliases.contains(alias)) { path = file; @@ -3719,4 +3799,282 @@ public static String humanReadableByteCount(long bytes) { String suffix = "KMGTPE".charAt(exp-1) + ""; return String.format("%.2f%sB", bytes / Math.pow(unit, exp), suffix); } + + private static final String MANIFEST_EXTENSION = ".manifest"; + + private static void tryDelete(FileSystem fs, Path path) { + try { + fs.delete(path, true); + } catch (IOException ex) { + LOG.error("Failed to delete " + path, ex); + } + } + + public static Path[] getMmDirectoryCandidates(FileSystem fs, Path path, int dpLevels, + int lbLevels, PathFilter filter, long mmWriteId, Configuration conf) throws IOException { + int skipLevels = dpLevels + lbLevels; + if (filter == null) { + filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); + } + if (skipLevels == 0) { + return statusToPath(fs.listStatus(path, filter)); + } + if (fs.getScheme().equalsIgnoreCase("s3a") + && HiveConf.getBoolVar(conf, ConfVars.HIVE_MM_AVOID_GLOBSTATUS_ON_S3)) { + return getMmDirectoryCandidatesRecursive(fs, path, skipLevels, filter); + } + return getMmDirectoryCandidatesGlobStatus(fs, path, skipLevels, filter, mmWriteId); + } + + private static Path[] statusToPath(FileStatus[] statuses) { + if (statuses == null) return null; + Path[] paths = new Path[statuses.length]; + for (int i = 0; i < statuses.length; ++i) { + paths[i] = statuses[i].getPath(); + } + return paths; + } + + private static Path[] getMmDirectoryCandidatesRecursive(FileSystem fs, + Path path, int skipLevels, PathFilter filter) throws IOException { + String lastRelDir = null; + HashSet results = new HashSet(); + String relRoot = Path.getPathWithoutSchemeAndAuthority(path).toString(); + if (!relRoot.endsWith(Path.SEPARATOR)) { + relRoot += Path.SEPARATOR; + } + RemoteIterator allFiles = fs.listFiles(path, true); + while (allFiles.hasNext()) { + LocatedFileStatus lfs = allFiles.next(); + Path dirPath = Path.getPathWithoutSchemeAndAuthority(lfs.getPath()); + String dir = dirPath.toString(); + if (!dir.startsWith(relRoot)) { + throw new IOException("Path " + lfs.getPath() + " is not under " + relRoot + + " (when shortened to " + dir + ")"); + } + String subDir = dir.substring(relRoot.length()); + Utilities.LOG14535.info("Looking at " + subDir + " from " + lfs.getPath()); + // If sorted, we'll skip a bunch of files. + if (lastRelDir != null && subDir.startsWith(lastRelDir)) continue; + int startIx = skipLevels > 0 ? -1 : 0; + for (int i = 0; i < skipLevels; ++i) { + startIx = subDir.indexOf(Path.SEPARATOR_CHAR, startIx + 1); + if (startIx == -1) { + Utilities.LOG14535.info("Expected level of nesting (" + skipLevels + ") is not " + + " present in " + subDir + " (from " + lfs.getPath() + ")"); + break; + } + } + if (startIx == -1) continue; + int endIx = subDir.indexOf(Path.SEPARATOR_CHAR, startIx + 1); + if (endIx == -1) { + Utilities.LOG14535.info("Expected level of nesting (" + (skipLevels + 1) + ") is not " + + " present in " + subDir + " (from " + lfs.getPath() + ")"); + continue; + } + lastRelDir = subDir = subDir.substring(0, endIx); + Path candidate = new Path(relRoot, subDir); + Utilities.LOG14535.info("Considering MM directory candidate " + candidate); + if (!filter.accept(candidate)) continue; + results.add(fs.makeQualified(candidate)); + } + return results.toArray(new Path[results.size()]); + } + + private static Path[] getMmDirectoryCandidatesGlobStatus(FileSystem fs, + Path path, int skipLevels, PathFilter filter, long mmWriteId) throws IOException { + StringBuilder sb = new StringBuilder(path.toUri().getPath()); + for (int i = 0; i < skipLevels; i++) { + sb.append(Path.SEPARATOR).append("*"); + } + sb.append(Path.SEPARATOR).append(ValidWriteIds.getMmFilePrefix(mmWriteId)); + Path pathPattern = new Path(path, sb.toString()); + Utilities.LOG14535.info("Looking for files via: " + pathPattern); + return statusToPath(fs.globStatus(pathPattern, filter)); + } + + private static void tryDeleteAllMmFiles(FileSystem fs, Path specPath, Path manifestDir, + int dpLevels, int lbLevels, String unionSuffix, ValidWriteIds.IdPathFilter filter, + long mmWriteId, Configuration conf) throws IOException { + Path[] files = getMmDirectoryCandidates( + fs, specPath, dpLevels, lbLevels, filter, mmWriteId, conf); + if (files != null) { + for (Path path : files) { + Utilities.LOG14535.info("Deleting " + path + " on failure"); + tryDelete(fs, path); + } + } + Utilities.LOG14535.info("Deleting " + manifestDir + " on failure"); + fs.delete(manifestDir, true); + } + + + public static void writeMmCommitManifest(List commitPaths, Path specPath, FileSystem fs, + String taskId, Long mmWriteId, String unionSuffix) throws HiveException { + if (commitPaths.isEmpty()) return; + // We assume one FSOP per task (per specPath), so we create it in specPath. + Path manifestPath = getManifestDir(specPath, mmWriteId, unionSuffix); + manifestPath = new Path(manifestPath, taskId + MANIFEST_EXTENSION); + Utilities.LOG14535.info("Writing manifest to " + manifestPath + " with " + commitPaths); + try { + // Don't overwrite the manifest... should fail if we have collisions. + try (FSDataOutputStream out = fs.create(manifestPath, false)) { + if (out == null) { + throw new HiveException("Failed to create manifest at " + manifestPath); + } + out.writeInt(commitPaths.size()); + for (Path path : commitPaths) { + out.writeUTF(path.toString()); + } + } + } catch (IOException e) { + throw new HiveException(e); + } + } + + private static Path getManifestDir(Path specPath, long mmWriteId, String unionSuffix) { + Path manifestPath = new Path(specPath, "_tmp." + ValidWriteIds.getMmFilePrefix(mmWriteId)); + return (unionSuffix == null) ? manifestPath : new Path(manifestPath, unionSuffix); + } + + public static final class MissingBucketsContext { + public final TableDesc tableInfo; + public final int numBuckets; + public final boolean isCompressed; + public MissingBucketsContext(TableDesc tableInfo, int numBuckets, boolean isCompressed) { + this.tableInfo = tableInfo; + this.numBuckets = numBuckets; + this.isCompressed = isCompressed; + } + } + + public static void handleMmTableFinalPath(Path specPath, String unionSuffix, Configuration hconf, + boolean success, int dpLevels, int lbLevels, MissingBucketsContext mbc, long mmWriteId, + Reporter reporter) throws IOException, HiveException { + FileSystem fs = specPath.getFileSystem(hconf); + Path manifestDir = getManifestDir(specPath, mmWriteId, unionSuffix); + if (!success) { + ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); + tryDeleteAllMmFiles(fs, specPath, manifestDir, dpLevels, lbLevels, + unionSuffix, filter, mmWriteId, hconf); + return; + } + + Utilities.LOG14535.info("Looking for manifests in: " + manifestDir + " (" + mmWriteId + ")"); + // TODO# may be wrong if there are no splits (empty insert/CTAS) + FileStatus[] manifestFiles = fs.listStatus(manifestDir); + List manifests = new ArrayList<>(); + if (manifestFiles != null) { + for (FileStatus status : manifestFiles) { + Path path = status.getPath(); + if (path.getName().endsWith(MANIFEST_EXTENSION)) { + Utilities.LOG14535.info("Reading manifest " + path); + manifests.add(path); + } + } + } + + Utilities.LOG14535.info("Looking for files in: " + specPath); + ValidWriteIds.IdPathFilter filter = new ValidWriteIds.IdPathFilter(mmWriteId, true); + Path[] files = getMmDirectoryCandidates( + fs, specPath, dpLevels, lbLevels, filter, mmWriteId, hconf); + ArrayList mmDirectories = new ArrayList<>(); + if (files != null) { + for (Path path : files) { + Utilities.LOG14535.info("Looking at path: " + path); + mmDirectories.add(path); + } + } + + HashSet committed = new HashSet<>(); + for (Path mfp : manifests) { + try (FSDataInputStream mdis = fs.open(mfp)) { + int fileCount = mdis.readInt(); + for (int i = 0; i < fileCount; ++i) { + String nextFile = mdis.readUTF(); + if (!committed.add(nextFile)) { + throw new HiveException(nextFile + " was specified in multiple manifests"); + } + } + } + } + + Utilities.LOG14535.info("Deleting manifest directory " + manifestDir); + tryDelete(fs, manifestDir); + if (unionSuffix != null) { + // Also delete the parent directory if we are the last union FSOP to execute. + manifestDir = manifestDir.getParent(); + FileStatus[] remainingFiles = fs.listStatus(manifestDir); + if (remainingFiles == null || remainingFiles.length == 0) { + Utilities.LOG14535.info("Deleting manifest directory " + manifestDir); + tryDelete(fs, manifestDir); + } + } + + for (Path path : mmDirectories) { + cleanMmDirectory(path, fs, unionSuffix, committed); + } + + if (!committed.isEmpty()) { + throw new HiveException("The following files were committed but not found: " + committed); + } + + if (mmDirectories.isEmpty()) return; + + // TODO: see HIVE-14886 - removeTempOrDuplicateFiles is broken for list bucketing, + // so maintain parity here by not calling it at all. + if (lbLevels != 0) return; + // Create fake file statuses to avoid querying the file system. removeTempOrDuplicateFiles + // doesn't need tocheck anything except path and directory status for MM directories. + FileStatus[] finalResults = new FileStatus[mmDirectories.size()]; + for (int i = 0; i < mmDirectories.size(); ++i) { + finalResults[i] = new PathOnlyFileStatus(mmDirectories.get(i)); + } + List emptyBuckets = Utilities.removeTempOrDuplicateFiles( + fs, finalResults, dpLevels, mbc == null ? 0 : mbc.numBuckets, hconf, mmWriteId); + // create empty buckets if necessary + if (emptyBuckets.size() > 0) { + assert mbc != null; + Utilities.createEmptyBuckets(hconf, emptyBuckets, mbc.isCompressed, mbc.tableInfo, reporter); + } + } + + private static final class PathOnlyFileStatus extends FileStatus { + public PathOnlyFileStatus(Path path) { + super(0, true, 0, 0, 0, path); + } + } + + private static void cleanMmDirectory(Path dir, FileSystem fs, + String unionSuffix, HashSet committed) throws IOException, HiveException { + for (FileStatus child : fs.listStatus(dir)) { + Path childPath = child.getPath(); + if (unionSuffix == null) { + if (committed.remove(childPath.toString())) continue; // A good file. + deleteUncommitedFile(childPath, fs); + } else if (!child.isDirectory()) { + if (committed.contains(childPath.toString())) { + throw new HiveException("Union FSOP has commited " + + childPath + " outside of union directory" + unionSuffix); + } + deleteUncommitedFile(childPath, fs); + } else if (childPath.getName().equals(unionSuffix)) { + // Found the right union directory; treat it as "our" MM directory. + cleanMmDirectory(childPath, fs, null, committed); + } else { + Utilities.LOG14535.info("FSOP for " + unionSuffix + + " is ignoring the other side of the union " + childPath.getName()); + } + } + } + + private static void deleteUncommitedFile(Path childPath, FileSystem fs) + throws IOException, HiveException { + Utilities.LOG14535.info("Deleting " + childPath + " that was not committed"); + // We should actually succeed here - if we fail, don't commit the query. + if (!fs.delete(childPath, true)) { + throw new HiveException("Failed to delete an uncommitted path " + childPath); + } + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index f1eba5d21fce..ecbc21630b3b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.metadata.Table; @@ -274,8 +275,9 @@ static long parseBase(Path path) { return result; } + // INSERT_ONLY is a special operation which we only support INSERT operations, no UPDATE/DELETE public enum Operation { - NOT_ACID, INSERT, UPDATE, DELETE; + NOT_ACID, INSERT, UPDATE, DELETE, INSERT_ONLY } /** @@ -343,8 +345,11 @@ public static class AcidOperationalProperties { public static final String SPLIT_UPDATE_STRING = "split_update"; public static final int HASH_BASED_MERGE_BIT = 0x02; public static final String HASH_BASED_MERGE_STRING = "hash_merge"; + public static final int INSERT_ONLY_BIT = 0x03; + public static final String INSERT_ONLY_STRING = "insert_only"; public static final String DEFAULT_VALUE_STRING = TransactionalValidationListener.DEFAULT_TRANSACTIONAL_PROPERTY; public static final String LEGACY_VALUE_STRING = TransactionalValidationListener.LEGACY_TRANSACTIONAL_PROPERTY; + public static final String INSERTONLY_VALUE_STRING = TransactionalValidationListener.INSERTONLY_TRANSACTIONAL_PROPERTY; private AcidOperationalProperties() { } @@ -372,6 +377,17 @@ public static AcidOperationalProperties getDefault() { return obj; } + /** + * Returns an acidOperationalProperties object for tables that uses ACID framework but only + * supports INSERT operation and does not require ORC or bucketing + * @return the acidOperationalProperties object + */ + public static AcidOperationalProperties getInsertOnly() { + AcidOperationalProperties obj = new AcidOperationalProperties(); + obj.setInsertOnly(true); + return obj; + } + /** * Returns an acidOperationalProperties object that is represented by an encoded string. * @param propertiesStr an encoded string representing the acidOperationalProperties. @@ -387,6 +403,9 @@ public static AcidOperationalProperties parseString(String propertiesStr) { if (propertiesStr.equalsIgnoreCase(LEGACY_VALUE_STRING)) { return AcidOperationalProperties.getLegacy(); } + if (propertiesStr.equalsIgnoreCase(INSERTONLY_VALUE_STRING)) { + return AcidOperationalProperties.getInsertOnly(); + } AcidOperationalProperties obj = new AcidOperationalProperties(); String[] options = propertiesStr.split("\\|"); for (String option : options) { @@ -447,6 +466,12 @@ public AcidOperationalProperties setHashBasedMerge(boolean isHashBasedMerge) { return this; } + public AcidOperationalProperties setInsertOnly(boolean isInsertOnly) { + description = (isInsertOnly + ? (description | INSERT_ONLY_BIT) : (description & ~INSERT_ONLY_BIT)); + return this; + } + public boolean isSplitUpdate() { return (description & SPLIT_UPDATE_BIT) > 0; } @@ -455,6 +480,10 @@ public boolean isHashBasedMerge() { return (description & HASH_BASED_MERGE_BIT) > 0; } + public boolean isInsertOnly() { + return (description & INSERT_ONLY_BIT) > 0; + } + public int toInt() { return description; } @@ -468,6 +497,9 @@ public String toString() { if (isHashBasedMerge()) { str.append("|" + HASH_BASED_MERGE_STRING); } + if (isInsertOnly()) { + str.append("|" + INSERT_ONLY_STRING); + } return str.toString(); } } @@ -1076,6 +1108,17 @@ public static boolean isAcidTable(Table table) { return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true"); } + /** + * Checks if a table is an ACID table that only supports INSERT, but not UPDATE/DELETE + * @param table table + * @return true if table is an INSERT_ONLY table, false otherwise + */ + public static boolean isInsertOnlyTable(Table table) { + String transactionalProp = table.getProperty(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES); + return transactionalProp != null && + AcidUtils.AcidOperationalProperties.INSERT_ONLY_STRING.equals(transactionalProp); + } + /** * Sets the acidOperationalProperties in the configuration object argument. * @param conf Mutable configuration object diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java index e91064b9c75e..59d6142c01e2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.log.PerfLogger; @@ -104,8 +105,10 @@ public Set call() throws Exception { Class inputFormatClass = part.getInputFileFormatClass(); InputFormat inputFormat = getInputFormatFromCache(inputFormatClass, conf); - if (inputFormat instanceof AvoidSplitCombination && - ((AvoidSplitCombination) inputFormat).shouldSkipCombine(paths[i + start], conf)) { + boolean isAvoidSplitCombine = inputFormat instanceof AvoidSplitCombination && + ((AvoidSplitCombination) inputFormat).shouldSkipCombine(paths[i + start], conf); + boolean isMmTable = MetaStoreUtils.isMmTable(part.getTableDesc().getProperties()); + if (isAvoidSplitCombine || isMmTable) { if (LOG.isDebugEnabled()) { LOG.debug("The path [" + paths[i + start] + "] is being parked for HiveInputFormat.getSplits"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index c4b9940c06d2..428093cc8e3b 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -23,10 +23,13 @@ import java.io.DataInput; import java.io.DataOutput; +import java.io.FileNotFoundException; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; +import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -39,8 +42,11 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil; @@ -345,7 +351,12 @@ protected void init(JobConf job) { */ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, JobConf conf, InputFormat inputFormat, Class inputFormatClass, int splits, - TableDesc table, List result) throws IOException { + TableDesc table, Map writeIdMap, List result) + throws IOException { + ValidWriteIds writeIds = extractWriteIds(writeIdMap, conf, table.getTableName()); + if (writeIds != null) { + Utilities.LOG14535.info("Observing " + table.getTableName() + ": " + writeIds); + } Utilities.copyTablePropertiesToConf(table, conf); @@ -353,7 +364,11 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job pushFilters(conf, tableScan); } - FileInputFormat.setInputPaths(conf, dirs.toArray(new Path[dirs.size()])); + Path[] finalDirs = processPathsForMmRead(dirs, conf, writeIds); + if (finalDirs == null) { + return; // No valid inputs. + } + FileInputFormat.setInputPaths(conf, finalDirs); conf.setInputFormat(inputFormat.getClass()); int headerCount = 0; @@ -373,6 +388,61 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job } } + public static Path[] processPathsForMmRead(List dirs, JobConf conf, + ValidWriteIds writeIds) throws IOException { + if (writeIds == null) { + return dirs.toArray(new Path[dirs.size()]); + } else { + List finalPaths = new ArrayList<>(dirs.size()); + for (Path dir : dirs) { + processForWriteIds(dir, conf, writeIds, finalPaths); + } + if (finalPaths.isEmpty()) { + LOG.warn("No valid inputs found in " + dirs); + return null; + } + return finalPaths.toArray(new Path[finalPaths.size()]); + } + } + + private static void processForWriteIds(Path dir, JobConf conf, + ValidWriteIds writeIds, List finalPaths) throws IOException { + FileSystem fs = dir.getFileSystem(conf); + Utilities.LOG14535.warn("Checking " + dir + " (root) for inputs"); + FileStatus[] files = fs.listStatus(dir); // TODO: batch? + LinkedList subdirs = new LinkedList<>(); + for (FileStatus file : files) { + handleNonMmDirChild(file, writeIds, subdirs, finalPaths); + } + while (!subdirs.isEmpty()) { + Path subdir = subdirs.poll(); + for (FileStatus file : fs.listStatus(subdir)) { + handleNonMmDirChild(file, writeIds, subdirs, finalPaths); + } + } + } + + private static void handleNonMmDirChild(FileStatus file, ValidWriteIds writeIds, + LinkedList subdirs, List finalPaths) { + Path path = file.getPath(); + Utilities.LOG14535.warn("Checking " + path + " for inputs"); + if (!file.isDirectory()) { + Utilities.LOG14535.warn("Ignoring a file not in MM directory " + path); + return; + } + Long writeId = ValidWriteIds.extractWriteId(path); + if (writeId == null) { + subdirs.add(path); + return; + } + if (!writeIds.isValid(writeId)) { + Utilities.LOG14535.warn("Ignoring an uncommitted directory " + path); + return; + } + Utilities.LOG14535.info("Adding input " + path); + finalPaths.add(path); + } + Path[] getInputPaths(JobConf job) throws IOException { Path[] dirs; if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) { @@ -416,6 +486,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { StringBuilder readColumnNamesBuffer = new StringBuilder(newjob. get(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, "")); // for each dir, get the InputFormat, and do getSplits. + Map writeIdMap = new HashMap<>(); for (Path dir : dirs) { PartitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir); Class inputFormatClass = part.getInputFileFormatClass(); @@ -466,7 +537,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { addSplitsForGroup(currentDirs, currentTableScan, newjob, getInputFormatFromCache(currentInputFormatClass, job), currentInputFormatClass, currentDirs.size()*(numSplits / dirs.length), - currentTable, result); + currentTable, writeIdMap, result); } currentDirs.clear(); @@ -488,7 +559,7 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { addSplitsForGroup(currentDirs, currentTableScan, newjob, getInputFormatFromCache(currentInputFormatClass, job), currentInputFormatClass, currentDirs.size()*(numSplits / dirs.length), - currentTable, result); + currentTable, writeIdMap, result); } Utilities.clearWorkMapForConf(job); @@ -499,6 +570,19 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException { return result.toArray(new HiveInputSplit[result.size()]); } + public static ValidWriteIds extractWriteIds(Map writeIdMap, + JobConf newjob, String tableName) { + if (StringUtils.isBlank(tableName)) return null; + ValidWriteIds writeIds = writeIdMap.get(tableName); + if (writeIds == null) { + writeIds = ValidWriteIds.createFromConf(newjob, tableName); + writeIdMap.put(tableName, writeIds != null ? writeIds : ValidWriteIds.NO_WRITE_IDS); + } else if (writeIds == ValidWriteIds.NO_WRITE_IDS) { + writeIds = null; + } + return writeIds; + } + private void pushProjection(final JobConf newjob, final StringBuilder readColumnsBuffer, final StringBuilder readColumnNamesBuffer) { String readColIds = readColumnsBuffer.toString(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index dab4c6a10ac0..c7ac452634c0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -51,6 +51,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.ConcurrentHashMap; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import javax.jdo.JDODataStoreException; @@ -66,6 +67,7 @@ import org.apache.hadoop.hive.common.HiveStatsUtils; import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable; import org.apache.hadoop.hive.conf.HiveConf; @@ -95,6 +97,7 @@ import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; +import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResult; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; import org.apache.hadoop.hive.metastore.api.HiveObjectRef; import org.apache.hadoop.hive.metastore.api.HiveObjectType; @@ -1496,13 +1499,30 @@ public Database getDatabaseCurrent() throws HiveException { return getDatabase(currentDb); } - public void loadPartition(Path loadPath, String tableName, - Map partSpec, boolean replace, - boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, - boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException { + public void loadSinglePartition(Path loadPath, String tableName, + Map partSpec, boolean replace, boolean inheritTableSpecs, + boolean isSkewedStoreAsSubdir, boolean isSrcLocal, boolean isAcid, + boolean hasFollowingStatsTask, Long mmWriteId, boolean isCommitMmWrite) + throws HiveException { Table tbl = getTable(tableName); + boolean isMmTableWrite = (mmWriteId != null); + Preconditions.checkState(isMmTableWrite == MetaStoreUtils.isMmTable(tbl.getParameters())); loadPartition(loadPath, tbl, partSpec, replace, inheritTableSpecs, - isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask); + isSkewedStoreAsSubdir, isSrcLocal, isAcid, hasFollowingStatsTask, mmWriteId); + if (isMmTableWrite && isCommitMmWrite) { + // The assumption behind committing here is that this partition is the only one outputted. + commitMmTableWrite(tbl, mmWriteId); + } + } + + + public void commitMmTableWrite(Table tbl, Long mmWriteId) + throws HiveException { + try { + getMSC().finalizeTableWrite(tbl.getDbName(), tbl.getTableName(), mmWriteId, true); + } catch (TException e) { + throw new HiveException(e); + } } /** @@ -1526,11 +1546,10 @@ public void loadPartition(Path loadPath, String tableName, * If the source directory is LOCAL * @param isAcid true if this is an ACID operation */ - public Partition loadPartition(Path loadPath, Table tbl, - Map partSpec, boolean replace, - boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, - boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask) throws HiveException { - + public Partition loadPartition(Path loadPath, Table tbl, Map partSpec, + boolean replace, boolean inheritTableSpecs, boolean isSkewedStoreAsSubdir, + boolean isSrcLocal, boolean isAcid, boolean hasFollowingStatsTask, Long mmWriteId) + throws HiveException { Path tblDataLocationPath = tbl.getDataLocation(); try { Partition oldPart = getPartition(tbl, partSpec, false); @@ -1570,16 +1589,40 @@ public Partition loadPartition(Path loadPath, Table tbl, List newFiles = null; PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin("MoveTask", "FileMoves"); - if (replace || (oldPart == null && !isAcid)) { - replaceFiles(tbl.getPath(), loadPath, newPartPath, oldPartPath, getConf(), - isSrcLocal); + // TODO: this assumes both paths are qualified; which they are, currently. + if (mmWriteId != null && loadPath.equals(newPartPath)) { + // MM insert query, move itself is a no-op. + Utilities.LOG14535.info("not moving " + loadPath + " to " + newPartPath + " (MM)"); + assert !isAcid; + if (areEventsForDmlNeeded(tbl, oldPart)) { + newFiles = listFilesCreatedByQuery(loadPath, mmWriteId); + } + Utilities.LOG14535.info("maybe deleting stuff from " + oldPartPath + " (new " + newPartPath + ") for replace"); + if (replace && oldPartPath != null) { + deleteOldPathForReplace(newPartPath, oldPartPath, getConf(), + new ValidWriteIds.IdPathFilter(mmWriteId, false), mmWriteId != null); + } } else { - if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null) { - newFiles = Collections.synchronizedList(new ArrayList()); + // Either a non-MM query, or a load into MM table from an external source. + PathFilter filter = FileUtils.HIDDEN_FILES_PATH_FILTER; + Path destPath = newPartPath; + if (mmWriteId != null) { + // We will load into MM directory, and delete from the parent if needed. + destPath = new Path(destPath, ValidWriteIds.getMmFilePrefix(mmWriteId)); + filter = replace ? new ValidWriteIds.IdPathFilter(mmWriteId, false) : filter; } + Utilities.LOG14535.info("moving " + loadPath + " to " + destPath); + if (replace || (oldPart == null && !isAcid)) { + replaceFiles(tbl.getPath(), loadPath, destPath, oldPartPath, getConf(), + isSrcLocal, filter, mmWriteId != null); + } else { + if (areEventsForDmlNeeded(tbl, oldPart)) { + newFiles = Collections.synchronizedList(new ArrayList()); + } - FileSystem fs = tbl.getDataLocation().getFileSystem(conf); - Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid, newFiles); + FileSystem fs = tbl.getDataLocation().getFileSystem(conf); + Hive.copyFiles(conf, loadPath, newPartPath, fs, isSrcLocal, isAcid, newFiles); + } } perfLogger.PerfLogEnd("MoveTask", "FileMoves"); Partition newTPart = oldPart != null ? oldPart : new Partition(tbl, partSpec, newPartPath); @@ -1651,6 +1694,54 @@ public Partition loadPartition(Path loadPath, Table tbl, } } + + private boolean areEventsForDmlNeeded(Table tbl, Partition oldPart) { + return conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary() && oldPart != null; + } + + private List listFilesCreatedByQuery(Path loadPath, long mmWriteId) throws HiveException { + List newFiles = new ArrayList(); + final String filePrefix = ValidWriteIds.getMmFilePrefix(mmWriteId); + FileStatus[] srcs; + FileSystem srcFs; + try { + srcFs = loadPath.getFileSystem(conf); + srcs = srcFs.listStatus(loadPath); + } catch (IOException e) { + LOG.error("Error listing files", e); + throw new HiveException(e); + } + if (srcs == null) { + LOG.info("No sources specified: " + loadPath); + return newFiles; + } + PathFilter subdirFilter = null; + + // TODO: just like the move path, we only do one level of recursion. + for (FileStatus src : srcs) { + if (src.isDirectory()) { + if (subdirFilter == null) { + subdirFilter = new PathFilter() { + @Override + public boolean accept(Path path) { + return path.getName().startsWith(filePrefix); + } + }; + } + try { + for (FileStatus srcFile : srcFs.listStatus(src.getPath(), subdirFilter)) { + newFiles.add(srcFile.getPath()); + } + } catch (IOException e) { + throw new HiveException(e); + } + } else if (src.getPath().getName().startsWith(filePrefix)) { + newFiles.add(src.getPath()); + } + } + return newFiles; + } + private void setStatsPropAndAlterPartition(boolean hasFollowingStatsTask, Table tbl, Partition newTPart) throws MetaException, TException { EnvironmentContext environmentContext = null; @@ -1752,17 +1843,35 @@ private Map, String> constructListBucketingLocationMap(Path newPart * @return Set of valid partitions * @throws HiveException */ - private Set getValidPartitionsInPath(int numDP, Path loadPath) throws HiveException { + private Set getValidPartitionsInPath( + int numDP, int numLB, Path loadPath, Long mmWriteId) throws HiveException { Set validPartitions = new HashSet(); try { FileSystem fs = loadPath.getFileSystem(conf); - FileStatus[] leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP, fs); - // Check for empty partitions - for (FileStatus s : leafStatus) { - if (!s.isDirectory()) { - throw new HiveException("partition " + s.getPath() + " is not a directory!"); + if (mmWriteId == null) { + FileStatus[] leafStatus = HiveStatsUtils.getFileStatusRecurse(loadPath, numDP, fs); + // Check for empty partitions + for (FileStatus s : leafStatus) { + if (!s.isDirectory()) { + throw new HiveException("partition " + s.getPath() + " is not a directory!"); + } + Path dpPath = s.getPath(); + Utilities.LOG14535.info("Found DP " + dpPath); + validPartitions.add(dpPath); + } + } else { + // The non-MM path only finds new partitions, as it is looking at the temp path. + // To produce the same effect, we will find all the partitions affected by this write ID. + Path[] leafStatus = Utilities.getMmDirectoryCandidates( + fs, loadPath, numDP, numLB, null, mmWriteId, conf); + for (Path p : leafStatus) { + Path dpPath = p.getParent(); // Skip the MM directory that we have found. + for (int i = 0; i < numLB; ++i) { + dpPath = dpPath.getParent(); // Now skip the LB directories, if any... + } + Utilities.LOG14535.info("Found DP " + dpPath); + validPartitions.add(dpPath); } - validPartitions.add(s.getPath()); } } catch (IOException e) { throw new HiveException(e); @@ -1800,8 +1909,8 @@ private Set getValidPartitionsInPath(int numDP, Path loadPath) throws Hive */ public Map, Partition> loadDynamicPartitions(final Path loadPath, final String tableName, final Map partSpec, final boolean replace, - final int numDP, final boolean listBucketingEnabled, final boolean isAcid, final long txnId, - final boolean hasFollowingStatsTask, final AcidUtils.Operation operation) + final int numDP, final int numLB, final boolean isAcid, final long txnId, + final boolean hasFollowingStatsTask, final AcidUtils.Operation operation, final Long mmWriteId) throws HiveException { final Map, Partition> partitionsMap = @@ -1816,7 +1925,7 @@ public Map, Partition> loadDynamicPartitions(final Path load // Get all valid partition paths and existing partitions for them (if any) final Table tbl = getTable(tableName); - final Set validPartitions = getValidPartitionsInPath(numDP, loadPath); + final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, mmWriteId); final int partsToLoad = validPartitions.size(); final AtomicInteger partitionsLoaded = new AtomicInteger(0); @@ -1843,9 +1952,10 @@ public Void call() throws Exception { LOG.info("New loading path = " + partPath + " with partSpec " + fullPartSpec); // load the partition + Utilities.LOG14535.info("loadPartition called for DPP from " + partPath + " to " + tbl.getTableName()); Partition newPartition = loadPartition(partPath, tbl, fullPartSpec, - replace, true, listBucketingEnabled, - false, isAcid, hasFollowingStatsTask); + replace, true, numLB > 0, + false, isAcid, hasFollowingStatsTask, mmWriteId); partitionsMap.put(fullPartSpec, newPartition); if (inPlaceEligible) { @@ -1863,7 +1973,7 @@ public Void call() throws Exception { + " table=" + tbl.getTableName() + ", " + " partSpec=" + fullPartSpec + ", " + " replace=" + replace + ", " - + " listBucketingEnabled=" + listBucketingEnabled + ", " + + " listBucketingLevel=" + numLB + ", " + " isAcid=" + isAcid + ", " + " hasFollowingStatsTask=" + hasFollowingStatsTask, t); throw t; @@ -1877,6 +1987,10 @@ public Void call() throws Exception { for (Future future : futures) { future.get(); } + if (mmWriteId != null) { + // Commit after we have processed all the partitions. + commitMmTableWrite(tbl, mmWriteId); + } } catch (InterruptedException | ExecutionException e) { LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); //cancel other futures @@ -1926,8 +2040,8 @@ public Void call() throws Exception { * @param isAcid true if this is an ACID based write */ public void loadTable(Path loadPath, String tableName, boolean replace, boolean isSrcLocal, - boolean isSkewedStoreAsSubdir, boolean isAcid, boolean hasFollowingStatsTask) - throws HiveException { + boolean isSkewedStoreAsSubdir, boolean isAcid, boolean hasFollowingStatsTask, + Long mmWriteId) throws HiveException { List newFiles = null; Table tbl = getTable(tableName); @@ -1935,16 +2049,35 @@ public void loadTable(Path loadPath, String tableName, boolean replace, boolean if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) { newFiles = Collections.synchronizedList(new ArrayList()); } - if (replace) { - Path tableDest = tbl.getPath(); - replaceFiles(tableDest, loadPath, tableDest, tableDest, sessionConf, isSrcLocal); + // TODO: this assumes both paths are qualified; which they are, currently. + if (mmWriteId != null && loadPath.equals(tbl.getPath())) { + Utilities.LOG14535.info("not moving " + loadPath + " to " + tbl.getPath()); + if (replace) { + Path tableDest = tbl.getPath(); + deleteOldPathForReplace(tableDest, tableDest, sessionConf, + new ValidWriteIds.IdPathFilter(mmWriteId, false), mmWriteId != null); + } + newFiles = listFilesCreatedByQuery(loadPath, mmWriteId); } else { - FileSystem fs; - try { - fs = tbl.getDataLocation().getFileSystem(sessionConf); - copyFiles(sessionConf, loadPath, tbl.getPath(), fs, isSrcLocal, isAcid, newFiles); - } catch (IOException e) { - throw new HiveException("addFiles: filesystem error in check phase", e); + // Either a non-MM query, or a load into MM table from an external source. + Path tblPath = tbl.getPath(), destPath = tblPath; + PathFilter filter = FileUtils.HIDDEN_FILES_PATH_FILTER; + if (mmWriteId != null) { + // We will load into MM directory, and delete from the parent if needed. + destPath = new Path(destPath, ValidWriteIds.getMmFilePrefix(mmWriteId)); + filter = replace ? new ValidWriteIds.IdPathFilter(mmWriteId, false) : filter; + } + Utilities.LOG14535.info("moving " + loadPath + " to " + tblPath); + if (replace) { + replaceFiles(tblPath, loadPath, destPath, tblPath, + sessionConf, isSrcLocal, filter, mmWriteId != null); + } else { + try { + FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf); + copyFiles(sessionConf, loadPath, destPath, fs, isSrcLocal, isAcid, newFiles); + } catch (IOException e) { + throw new HiveException("addFiles: filesystem error in check phase", e); + } } } if (!this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { @@ -1979,6 +2112,10 @@ public void loadTable(Path loadPath, String tableName, boolean replace, boolean throw new HiveException(e); } + if (mmWriteId != null) { + commitMmTableWrite(tbl, mmWriteId); + } + fireInsertEvent(tbl, null, newFiles); } @@ -2833,6 +2970,9 @@ private static void copyFiles(final HiveConf conf, final FileSystem destFs, destPath = mvFile(conf, srcP, destPath, isSrcLocal, srcFs, destFs, name, filetype); } + if (inheritPerms) { + HdfsUtils.setFullFileStatus(conf, fullDestStatus, srcGroup, destFs, destPath, false); + } if (null != newFiles) { newFiles.add(destPath); } @@ -3302,7 +3442,7 @@ private static void moveAcidDeltaFiles(String deltaFileType, PathFilter pathFilt * If the source directory is LOCAL */ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, HiveConf conf, - boolean isSrcLocal) throws HiveException { + boolean isSrcLocal, PathFilter deletePathFilter, boolean isMmTable) throws HiveException { try { FileSystem destFs = destf.getFileSystem(conf); @@ -3321,37 +3461,7 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, } if (oldPath != null) { - boolean oldPathDeleted = false; - boolean isOldPathUnderDestf = false; - FileStatus[] statuses = null; - try { - FileSystem oldFs = oldPath.getFileSystem(conf); - statuses = oldFs.listStatus(oldPath, FileUtils.HIDDEN_FILES_PATH_FILTER); - // Do not delete oldPath if: - // - destf is subdir of oldPath - isOldPathUnderDestf = isSubDir(oldPath, destf, oldFs, destFs, false); - if (isOldPathUnderDestf) { - // if oldPath is destf or its subdir, its should definitely be deleted, otherwise its - // existing content might result in incorrect (extra) data. - // But not sure why we changed not to delete the oldPath in HIVE-8750 if it is - // not the destf or its subdir? - oldPathDeleted = trashFiles(oldFs, statuses, conf); - } - } catch (IOException e) { - if (isOldPathUnderDestf) { - // if oldPath is a subdir of destf but it could not be cleaned - throw new HiveException("Directory " + oldPath.toString() - + " could not be cleaned up.", e); - } else { - //swallow the exception since it won't affect the final result - LOG.warn("Directory " + oldPath.toString() + " cannot be cleaned: " + e, e); - } - } - if (statuses != null && statuses.length > 0) { - if (isOldPathUnderDestf && !oldPathDeleted) { - throw new HiveException("Destination directory " + destf + " has not be cleaned up."); - } - } + deleteOldPathForReplace(destf, oldPath, conf, deletePathFilter, isMmTable); } // first call FileUtils.mkdir to make sure that destf directory exists, if not, it creates @@ -3386,6 +3496,43 @@ protected void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, } } + private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, + PathFilter pathFilter, boolean isMmTable) throws HiveException { + Utilities.LOG14535.info("Deleting old paths for replace in " + destPath + " and old path " + oldPath); + boolean isOldPathUnderDestf = false; + try { + FileSystem oldFs = oldPath.getFileSystem(conf); + FileSystem destFs = destPath.getFileSystem(conf); + // if oldPath is destf or its subdir, its should definitely be deleted, otherwise its + // existing content might result in incorrect (extra) data. + // But not sure why we changed not to delete the oldPath in HIVE-8750 if it is + // not the destf or its subdir? + isOldPathUnderDestf = isSubDir(oldPath, destPath, oldFs, destFs, false); + if (isOldPathUnderDestf || isMmTable) { + FileStatus[] statuses = oldFs.listStatus(oldPath, pathFilter); + if (statuses == null || statuses.length == 0) return; + String s = "Deleting files under " + oldPath + " for replace: "; + for (FileStatus file : statuses) { + s += file.getPath().getName() + ", "; + } + Utilities.LOG14535.info(s); + if (!trashFiles(oldFs, statuses, conf)) { + throw new HiveException("Destination directory " + destPath + + " has not been cleaned up."); + } + } + } catch (IOException e) { + if (isOldPathUnderDestf || isMmTable) { + // if oldPath is a subdir of destf but it could not be cleaned + throw new HiveException("Directory " + oldPath.toString() + + " could not be cleaned up.", e); + } else { + //swallow the exception since it won't affect the final result + LOG.warn("Directory " + oldPath.toString() + " cannot be cleaned: " + e, e); + } + } + } + /** * Trashes or deletes all files under a directory. Leaves the directory as is. @@ -3951,4 +4098,25 @@ public void addForeignKey(List foreignKeyCols) throw new HiveException(e); } } + + public long getNextTableWriteId(String dbName, String tableName) throws HiveException { + try { + return getMSC().getNextTableWriteId(dbName, tableName); + } catch (Exception e) { + throw new HiveException(e); + } + } + + public ValidWriteIds getValidWriteIdsForTable( + String dbName, String tableName) throws HiveException { + try { + // TODO: decode ID ranges here if we use that optimization + GetValidWriteIdsResult result = getMSC().getValidWriteIds(dbName, tableName); + return new ValidWriteIds(result.getLowWatermarkId(), result.getHighWatermarkId(), + result.isSetAreIdsValid() && result.isAreIdsValid(), + result.isSetIds() ? new HashSet(result.getIds()) : null); + } catch (Exception e) { + throw new HiveException(e); + } + } }; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index c0edde9e9231..95a09e2989d7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -366,6 +366,7 @@ public FileStatus[] getSortedPaths() { // TODO: add test case and clean it up @SuppressWarnings("nls") public Path getBucketPath(int bucketNum) { + // Note: this makes assumptions that won't work with MM tables, unions, etc. FileStatus srcs[] = getSortedPaths(); if (srcs == null) { return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index cea99e1423c0..f0b5738e9aa5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -93,6 +93,7 @@ import org.apache.hadoop.hive.ql.plan.FileMergeDesc; import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc; +import org.apache.hadoop.hive.ql.plan.DependencyCollectionWork; import org.apache.hadoop.hive.ql.plan.LoadFileDesc; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredLocalWork; @@ -673,7 +674,7 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set 0, fsInput.getCompilationOpContext()); if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { @@ -1346,8 +1358,15 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, cplan.setInputformat("org.apache.hadoop.hive.ql.io.CombineHiveInputFormat"); // NOTE: we should gather stats in MR1 rather than MR2 at merge job since we don't // know if merge MR2 will be triggered at execution time + MoveWork dummyMv = null; + if (srcMmWriteId == null) { + // Only create the movework for non-MM table. No action needed for a MM table. + Utilities.LOG14535.info("creating dummy movetask for merge (with lfd)"); + dummyMv = new MoveWork(null, null, null, + new LoadFileDesc(inputDirName, finalName, true, null, null), false); + } ConditionalTask cndTsk = GenMapRedUtils.createCondTask(conf, currTask, dummyMv, work, - fsInputDesc.getFinalDirName().toString()); + fsInputDesc.getMergeInputDirName().toString()); // keep the dynamic partition context in conditional task resolver context ConditionalResolverMergeFilesCtx mrCtx = @@ -1358,7 +1377,12 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, // // 3. add the moveTask as the children of the conditional task // - linkMoveTask(fsOutput, cndTsk, mvTasks, conf, dependencyTask); + // Use the original fsOp path here in case of MM - while the new FSOP merges files inside the + // MM directory, the original MoveTask still commits based on the parent. Note that this path + // can only be triggered for a merge that's part of insert for now; MM tables do not support + // concatenate. Keeping the old logic for non-MM tables with temp directories and stuff. + Path fsopPath = srcMmWriteId != null ? fsInputDesc.getFinalDirName() : finalName; + linkMoveTask(fsopPath, cndTsk, mvTasks, conf, dependencyTask); } /** @@ -1371,11 +1395,11 @@ public static void createMRWorkForMergingFiles (FileSinkOperator fsInput, * @param hconf * @param dependencyTask */ - public static void linkMoveTask(FileSinkOperator newOutput, + private static void linkMoveTask(Path fsopPath, ConditionalTask cndTsk, List> mvTasks, HiveConf hconf, DependencyCollectionTask dependencyTask) { - Task mvTask = GenMapRedUtils.findMoveTask(mvTasks, newOutput); + Task mvTask = GenMapRedUtils.findMoveTaskForFsopOutput(mvTasks, fsopPath); for (Task tsk : cndTsk.getListTasks()) { linkMoveTask(mvTask, tsk, hconf, dependencyTask); @@ -1390,7 +1414,7 @@ public static void linkMoveTask(FileSinkOperator newOutput, * @param hconf * @param dependencyTask */ - public static void linkMoveTask(Task mvTask, + private static void linkMoveTask(Task mvTask, Task task, HiveConf hconf, DependencyCollectionTask dependencyTask) { @@ -1525,10 +1549,11 @@ private static MapWork createMRWorkForMergingFiles (HiveConf conf, TableScanOperator topOp, FileSinkDesc fsDesc) { ArrayList aliases = new ArrayList(); - Path inputDir = fsDesc.getFinalDirName(); + Path inputDir = fsDesc.getMergeInputDirName(); TableDesc tblDesc = fsDesc.getTableInfo(); aliases.add(inputDir.toString()); // dummy alias: just use the input path + Utilities.LOG14535.info("createMRWorkForMergingFiles for " + inputDir); // constructing the default MapredWork MapredWork cMrPlan = GenMapRedUtils.getMapRedWorkFromConf(conf); MapWork cplan = cMrPlan.getMapWork(); @@ -1553,8 +1578,9 @@ private static MapWork createMRWorkForMergingFiles (HiveConf conf, */ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, boolean hasDynamicPartitions, CompilationOpContext ctx) throws SemanticException { + + Path inputDir = fsInputDesc.getMergeInputDirName(); - Path inputDir = fsInputDesc.getFinalDirName(); TableDesc tblDesc = fsInputDesc.getTableInfo(); List inputDirs = new ArrayList(1); @@ -1578,6 +1604,7 @@ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, + " format other than RCFile or ORCFile"); } + Utilities.LOG14535.info("creating mergefilework from " + inputDirs + " to " + finalName); // create the merge file work MergeFileWork work = new MergeFileWork(inputDirs, finalName, hasDynamicPartitions, tblDesc.getInputFileFormatClass().getName()); @@ -1600,6 +1627,7 @@ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, } else { fmd = new OrcFileMergeDesc(); } + fmd.setMmWriteId(fsInputDesc.getMmWriteId()); fmd.setDpCtx(fsInputDesc.getDynPartCtx()); fmd.setOutputPath(finalName); fmd.setHasDynamicPartitions(work.hasDynamicPartitions()); @@ -1633,6 +1661,7 @@ public static MapWork createMergeTask(FileSinkDesc fsInputDesc, Path finalName, public static ConditionalTask createCondTask(HiveConf conf, Task currTask, MoveWork mvWork, Serializable mergeWork, String inputPath) { + Utilities.LOG14535.info("Creating conditional merge task for " + inputPath); // There are 3 options for this ConditionalTask: // 1) Merge the partitions @@ -1640,10 +1669,14 @@ public static ConditionalTask createCondTask(HiveConf conf, // 3) Merge some partitions and move other partitions (i.e. merge some partitions and don't // merge others) in this case the merge is done first followed by the move to prevent // conflicts. + // TODO: if we are not dealing with concatenate DDL, we should not create a merge+move path + // because it should be impossible to get incompatible outputs. + // Create a dummy task if no move is needed. + Serializable moveWork = mvWork != null ? mvWork : new DependencyCollectionWork(); Task mergeOnlyMergeTask = TaskFactory.get(mergeWork, conf); - Task moveOnlyMoveTask = TaskFactory.get(mvWork, conf); + Task moveOnlyMoveTask = TaskFactory.get(moveWork, conf); Task mergeAndMoveMergeTask = TaskFactory.get(mergeWork, conf); - Task mergeAndMoveMoveTask = TaskFactory.get(mvWork, conf); + Task mergeAndMoveMoveTask = TaskFactory.get(moveWork, conf); // NOTE! It is necessary merge task is the parent of the move task, and not // the other way around, for the proper execution of the execute method of @@ -1651,7 +1684,7 @@ public static ConditionalTask createCondTask(HiveConf conf, mergeAndMoveMergeTask.addDependentTask(mergeAndMoveMoveTask); List listWorks = new ArrayList(); - listWorks.add(mvWork); + listWorks.add(moveWork); listWorks.add(mergeWork); ConditionalWork cndWork = new ConditionalWork(listWorks); @@ -1687,8 +1720,8 @@ public static boolean isSkewedStoredAsDirs(FileSinkDesc fsInputDesc) { .isSkewedStoredAsDir(); } - public static Task findMoveTask( - List> mvTasks, FileSinkOperator fsOp) { + public static Task findMoveTaskForFsopOutput( + List> mvTasks, Path fsopFinalDir) { // find the move task for (Task mvTsk : mvTasks) { MoveWork mvWork = mvTsk.getWork(); @@ -1698,9 +1731,10 @@ public static Task findMoveTask( } else if (mvWork.getLoadTableWork() != null) { srcDir = mvWork.getLoadTableWork().getSourcePath(); } + Utilities.LOG14535.info("Observing MoveWork " + System.identityHashCode(mvWork) + + " with " + srcDir + " while looking for " + fsopFinalDir); - if ((srcDir != null) - && (srcDir.equals(fsOp.getConf().getFinalDirName()))) { + if ((srcDir != null) && srcDir.equals(fsopFinalDir)) { return mvTsk; } } @@ -1720,59 +1754,58 @@ public static boolean isMergeRequired(List> mvTasks, HiveConf hco Task currTask, boolean isInsertTable) { // Has the user enabled merging of files for map-only jobs or for all jobs - if ((mvTasks != null) && (!mvTasks.isEmpty())) { - - // no need of merging if the move is to a local file system - MoveTask mvTask = (MoveTask) GenMapRedUtils.findMoveTask(mvTasks, fsOp); - - if (mvTask != null && isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) - && !fsOp.getConf().isMaterialization()) { - // mark the MapredWork and FileSinkOperator for gathering stats - fsOp.getConf().setGatherStats(true); - fsOp.getConf().setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE)); - if (!mvTask.hasFollowingStatsTask()) { - GenMapRedUtils.addStatsTask(fsOp, mvTask, currTask, hconf); - } + if (mvTasks == null || mvTasks.isEmpty()) return false; + + // no need of merging if the move is to a local file system + // We are looking based on the original FSOP, so use the original path as is. + MoveTask mvTask = (MoveTask) GenMapRedUtils.findMoveTaskForFsopOutput( + mvTasks, fsOp.getConf().getFinalDirName()); + + // TODO: wtf? wtf?!! why is this in this method? + if (mvTask != null && isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) + && !fsOp.getConf().isMaterialization()) { + // mark the MapredWork and FileSinkOperator for gathering stats + fsOp.getConf().setGatherStats(true); + fsOp.getConf().setStatsReliable(hconf.getBoolVar(ConfVars.HIVE_STATS_RELIABLE)); + if (!mvTask.hasFollowingStatsTask()) { + GenMapRedUtils.addStatsTask(fsOp, mvTask, currTask, hconf); } + } - if ((mvTask != null) && !mvTask.isLocal() && fsOp.getConf().canBeMerged()) { + if (mvTask == null || mvTask.isLocal() || !fsOp.getConf().canBeMerged()) return false; - if (currTask.getWork() instanceof TezWork) { - // tez blurs the boundary between map and reduce, thus it has it's own - // config - return hconf.getBoolVar(ConfVars.HIVEMERGETEZFILES); - } else if (currTask.getWork() instanceof SparkWork) { - // spark has its own config for merging - return hconf.getBoolVar(ConfVars.HIVEMERGESPARKFILES); - } + if (currTask.getWork() instanceof TezWork) { + // tez blurs the boundary between map and reduce, thus it has it's own config + return hconf.getBoolVar(ConfVars.HIVEMERGETEZFILES); + } else if (currTask.getWork() instanceof SparkWork) { + // spark has its own config for merging + return hconf.getBoolVar(ConfVars.HIVEMERGESPARKFILES); + } + return isMergeRequiredForMr(hconf, fsOp, currTask); + } - if (fsOp.getConf().isLinkedFileSink()) { - // If the user has HIVEMERGEMAPREDFILES set to false, the idea was the - // number of reducers are few, so the number of files anyway are small. - // However, with this optimization, we are increasing the number of files - // possibly by a big margin. So, merge aggresively. - if (hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) || - hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES)) { - return true; - } - } else { - // There are separate configuration parameters to control whether to - // merge for a map-only job - // or for a map-reduce job - if (currTask.getWork() instanceof MapredWork) { - ReduceWork reduceWork = ((MapredWork) currTask.getWork()).getReduceWork(); - boolean mergeMapOnly = - hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) && reduceWork == null; - boolean mergeMapRed = - hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES) && - reduceWork != null; - if (mergeMapOnly || mergeMapRed) { - return true; - } - } else { - return false; - } - } + private static boolean isMergeRequiredForMr(HiveConf hconf, + FileSinkOperator fsOp, Task currTask) { + if (fsOp.getConf().isLinkedFileSink()) { + // If the user has HIVEMERGEMAPREDFILES set to false, the idea was the + // number of reducers are few, so the number of files anyway are small. + // However, with this optimization, we are increasing the number of files + // possibly by a big margin. So, merge aggresively. + return (hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) || + hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES)); + } + // There are separate configuration parameters to control whether to + // merge for a map-only job + // or for a map-reduce job + if (currTask.getWork() instanceof MapredWork) { + ReduceWork reduceWork = ((MapredWork) currTask.getWork()).getReduceWork(); + boolean mergeMapOnly = + hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) && reduceWork == null; + boolean mergeMapRed = + hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES) && + reduceWork != null; + if (mergeMapOnly || mergeMapRed) { + return true; } } return false; @@ -1796,33 +1829,37 @@ public static Path createMoveTask(Task currTask, boolean Path dest = null; + FileSinkDesc fileSinkDesc = fsOp.getConf(); + boolean isMmTable = fileSinkDesc.isMmTable(); if (chDir) { - FileSinkDesc fileSinkDesc = fsOp.getConf(); - dest = fileSinkDesc.getFinalDirName(); - - // generate the temporary file - // it must be on the same file system as the current destination - Context baseCtx = parseCtx.getContext(); - - // Create the required temporary file in the HDFS location if the destination - // path of the FileSinkOperator table is a blobstore path. - Path tmpDir = baseCtx.getTempDirForPath(fileSinkDesc.getDestPath()); - - // Change all the linked file sink descriptors - if (fileSinkDesc.isLinkedFileSink()) { - for (FileSinkDesc fsConf:fileSinkDesc.getLinkedFileSinkDesc()) { - fsConf.setParentDir(tmpDir); - fsConf.setDirName(new Path(tmpDir, fsConf.getDirName().getName())); + + dest = fileSinkDesc.getMergeInputDirName(); + if (!isMmTable) { + // generate the temporary file + // it must be on the same file system as the current destination + Context baseCtx = parseCtx.getContext(); + + // Create the required temporary file in the HDFS location if the destination + // path of the FileSinkOperator table is a blobstore path. + Path tmpDir = baseCtx.getTempDirForPath(fileSinkDesc.getDestPath()); + + // Change all the linked file sink descriptors + if (fileSinkDesc.isLinkedFileSink()) { + for (FileSinkDesc fsConf : fileSinkDesc.getLinkedFileSinkDesc()) { + fsConf.setDirName(new Path(tmpDir, fsConf.getDirName().getName())); + Utilities.LOG14535.info("createMoveTask setting tmpDir for LinkedFileSink chDir " + fsConf.getDirName() + "; dest was " + fileSinkDesc.getDestPath()); + } + } else { + fileSinkDesc.setDirName(tmpDir); + Utilities.LOG14535.info("createMoveTask setting tmpDir chDir " + tmpDir + "; dest was " + fileSinkDesc.getDestPath()); } - } else { - fileSinkDesc.setDirName(tmpDir); } } Task mvTask = null; if (!chDir) { - mvTask = GenMapRedUtils.findMoveTask(mvTasks, fsOp); + mvTask = GenMapRedUtils.findMoveTaskForFsopOutput(mvTasks, fsOp.getConf().getFinalDirName()); } // Set the move task to be dependent on the current task diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java index dd679db70373..2ad1f1cf9ebd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.FilterOperator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; @@ -190,7 +191,9 @@ public static Path[] prune(Partition part, SampleDesc sampleDescr) String fullScanMsg = ""; // check if input pruning is possible - if (sampleDescr.getInputPruning()) { + // TODO: this relies a lot on having one file per bucket. No support for MM tables for now. + boolean isMmTable = MetaStoreUtils.isMmTable(part.getTable().getParameters()); + if (sampleDescr.getInputPruning() && !isMmTable) { LOG.trace("numerator = " + num); LOG.trace("denominator = " + den); LOG.trace("bucket count = " + bucketCount); @@ -217,7 +220,7 @@ public static Path[] prune(Partition part, SampleDesc sampleDescr) } } else { // need to do full scan - fullScanMsg = "Tablesample not on clustered columns"; + fullScanMsg = isMmTable ? "MM table" : "Tablesample not on clustered columns"; } LOG.warn(fullScanMsg + ", using full table scan"); Path[] ret = part.getPath(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java index 2a7f3d4f037f..3a38a6de7c7a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcFactory.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.ql.exec.OperatorFactory; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.UnionOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; @@ -221,7 +222,7 @@ private void pushOperatorsAboveUnion(UnionOperator union, FileSinkDesc fileSinkDesc = (FileSinkDesc) fileSinkOp.getConf().clone(); fileSinkDesc.setDirName(new Path(parentDirName, parent.getIdentifier())); fileSinkDesc.setLinkedFileSink(true); - fileSinkDesc.setParentDir(parentDirName); + Utilities.LOG14535.info("Created LinkedFileSink for union " + fileSinkDesc.getDirName() + "; parent " + parentDirName); parent.setChildOperators(null); Operator tmpFileSinkOp = OperatorFactory.getAndMakeChild(fileSinkDesc, parent.getSchema(), parent); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index a264c4d059cb..038cbbf8c067 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -963,6 +963,10 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { if (indexes != null && indexes.size() > 0) { throw new SemanticException(ErrorMsg.TRUNCATE_COLUMN_INDEXED_TABLE.getMsg()); } + // It would be possible to support this, but this is such a pointless command. + if (MetaStoreUtils.isMmTable(table.getParameters())) { + throw new SemanticException("Truncating MM table columns not presently supported"); + } List bucketCols = null; Class inputFormatClass = null; @@ -1061,10 +1065,10 @@ private void analyzeTruncateTable(ASTNode ast) throws SemanticException { Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc); truncateTblDesc.setOutputDir(queryTmpdir); LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap() : partSpec); + partSpec == null ? new HashMap() : partSpec, null); ltd.setLbCtx(lbCtx); - Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), - conf); + @SuppressWarnings("unchecked") + Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); truncateTask.addDependentTask(moveTsk); // Recalculate the HDFS stats if auto gather stats is set @@ -1585,6 +1589,10 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, try { tblObj = getTable(tableName); + // TODO: we should probably block all ACID tables here. + if (MetaStoreUtils.isMmTable(tblObj.getParameters())) { + throw new SemanticException("Merge is not supported for MM tables"); + } List bucketCols = null; Class inputFormatClass = null; @@ -1672,11 +1680,11 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, TableDesc tblDesc = Utilities.getTableDesc(tblObj); Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc); mergeDesc.setOutputDir(queryTmpdir); + // No need to handle MM tables - unsupported path. LoadTableDesc ltd = new LoadTableDesc(queryTmpdir, tblDesc, - partSpec == null ? new HashMap() : partSpec); + partSpec == null ? new HashMap() : partSpec, null); ltd.setLbCtx(lbCtx); - Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), - conf); + Task moveTsk = TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf); mergeTask.addDependentTask(moveTsk); if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java index 167f7a59fe8f..3826d9ff51a9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java @@ -74,7 +74,8 @@ private EximUtil() { */ static URI getValidatedURI(HiveConf conf, String dcPath) throws SemanticException { try { - boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE); + boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE) + || conf.getBoolVar(HiveConf.ConfVars.HIVEEXIMTESTMODE); URI uri = new Path(dcPath).toUri(); String scheme = uri.getScheme(); String authority = uri.getAuthority(); @@ -136,7 +137,8 @@ static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws } public static String relativeToAbsolutePath(HiveConf conf, String location) throws SemanticException { - boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE); + boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE) + || conf.getBoolVar(HiveConf.ConfVars.HIVEEXIMTESTMODE); if (testMode) { URI uri = new Path(location).toUri(); String scheme = uri.getScheme(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java index fd80e6c3fbca..e1da05c26cb0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java @@ -305,8 +305,8 @@ public static void removeUnionOperators(GenTezProcContext context, BaseWork work linked.add(desc); desc.setDirName(new Path(path, "" + linked.size())); + Utilities.LOG14535.info("removing union - new desc with " + desc.getDirName() + "; parent " + path); desc.setLinkedFileSink(true); - desc.setParentDir(path); desc.setLinkedFileSinkDesc(linked); } @@ -372,6 +372,7 @@ public static void processFileSink(GenTezProcContext context, FileSinkOperator f // If underlying data is RCFile or OrcFile, RCFileBlockMerge task or // OrcFileStripeMerge task would be created. LOG.info("using CombineHiveInputformat for the merge job"); + Utilities.LOG14535.info("will generate MR work for merging files from " + fileSink.getConf().getDirName() + " to " + finalName); GenMapRedUtils.createMRWorkForMergingFiles(fileSink, finalName, context.dependencyTask, context.moveTask, hconf, context.currentTask); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 9986fcf70d99..2a525e70064c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -38,7 +38,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; @@ -48,6 +50,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.exec.ImportCommitWork; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; @@ -121,6 +124,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } // get partition metadata if partition specified if (child.getChildCount() == 2) { + @SuppressWarnings("unused") // TODO: wtf? ASTNode partspec = (ASTNode) child.getChild(1); isPartSpecSet = true; parsePartitionSpec(child, parsedPartSpec); @@ -158,9 +162,13 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { // Create table associated with the import // Executed if relevant, and used to contain all the other details about the table if not. - CreateTableDesc tblDesc = getBaseCreateTableDescFromTable(dbname,rv.getTable()); + CreateTableDesc tblDesc = getBaseCreateTableDescFromTable(dbname, rv.getTable()); + boolean isSourceMm = MetaStoreUtils.isMmTable(tblDesc.getTblProps()); - if (isExternalSet){ + if (isExternalSet) { + if (isSourceMm) { + throw new SemanticException("Cannot import an MM table as external"); + } tblDesc.setExternal(isExternalSet); // This condition-check could have been avoided, but to honour the old // default of not calling if it wasn't set, we retain that behaviour. @@ -219,21 +227,32 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { Table table = tableIfExists(tblDesc); if (table != null){ - checkTable(table, tblDesc,replicationSpec); + checkTable(table, tblDesc, replicationSpec); LOG.debug("table " + tblDesc.getTableName() + " exists: metadata checked"); tableExists = true; } + Long mmWriteId = null; + if (table != null && MetaStoreUtils.isMmTable(table.getParameters())) { + mmWriteId = db.getNextTableWriteId(table.getDbName(), table.getTableName()); + } else if (table == null && isSourceMm) { + // We could import everything as is - directories and IDs, but that won't work with ACID + // txn ids in future. So, let's import everything into the new MM directory with ID == 0. + mmWriteId = 0l; + } + if (mmWriteId != null) { + tblDesc.setInitialMmWriteId(mmWriteId); + } if (!replicationSpec.isInReplicationScope()){ createRegularImportTasks( rootTasks, tblDesc, partitionDescs, isPartSpecSet, replicationSpec, table, - fromURI, fs, wh); + fromURI, fs, wh, mmWriteId, isSourceMm); } else { createReplImportTasks( rootTasks, tblDesc, partitionDescs, isPartSpecSet, replicationSpec, table, - fromURI, fs, wh); + fromURI, fs, wh, mmWriteId, isSourceMm); } } catch (SemanticException e) { throw e; @@ -318,44 +337,42 @@ private CreateTableDesc getBaseCreateTableDescFromTable(String dbName, return tblDesc; } - private Task loadTable(URI fromURI, Table table, boolean replace, Path tgtPath) { + private Task loadTable(URI fromURI, Table table, boolean replace, Path tgtPath, + Long mmWriteId, boolean isSourceMm) { Path dataPath = new Path(fromURI.toString(), "data"); - Path tmpPath = ctx.getExternalTmpPath(tgtPath); - Task copyTask = TaskFactory.get(new CopyWork(dataPath, - tmpPath, false), conf); - LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath, - Utilities.getTableDesc(table), new TreeMap(), - replace); - Task loadTableTask = TaskFactory.get(new MoveWork(getInputs(), - getOutputs(), loadTableWork, null, false), conf); + Path destPath = mmWriteId == null ? ctx.getExternalTmpPath(tgtPath) + : new Path(tgtPath, ValidWriteIds.getMmFilePrefix(mmWriteId)); + Utilities.LOG14535.info("adding import work for table with source location: " + + dataPath + "; table: " + tgtPath + "; copy destination " + destPath + "; mm " + + mmWriteId + " (src " + isSourceMm + ") for " + (table == null ? "a new table" : table.getTableName())); + + CopyWork cv = new CopyWork(dataPath, destPath, false); + cv.setIsSourceMm(isSourceMm); + LoadTableDesc loadTableWork = new LoadTableDesc(destPath, + Utilities.getTableDesc(table), new TreeMap(), replace, mmWriteId); + MoveWork mv = new MoveWork(getInputs(), getOutputs(), loadTableWork, null, false); + @SuppressWarnings("unchecked") + Task loadTableTask = TaskFactory.get(mv, conf), copyTask = TaskFactory.get(cv, conf); copyTask.addDependentTask(loadTableTask); rootTasks.add(copyTask); return loadTableTask; } + @SuppressWarnings("unchecked") private Task createTableTask(CreateTableDesc tableDesc){ - return TaskFactory.get(new DDLWork( - getInputs(), - getOutputs(), - tableDesc - ), conf); + return TaskFactory.get(new DDLWork(getInputs(), getOutputs(), tableDesc), conf); } + @SuppressWarnings("unchecked") private Task dropTableTask(Table table){ - return TaskFactory.get(new DDLWork( - getInputs(), - getOutputs(), - new DropTableDesc(table.getTableName(), null, true, true, null) - ), conf); + return TaskFactory.get(new DDLWork(getInputs(), getOutputs(), + new DropTableDesc(table.getTableName(), null, true, true, null)), conf); } + @SuppressWarnings("unchecked") private Task alterTableTask(CreateTableDesc tableDesc) { tableDesc.setReplaceMode(true); - return TaskFactory.get(new DDLWork( - getInputs(), - getOutputs(), - tableDesc - ), conf); + return TaskFactory.get(new DDLWork(getInputs(), getOutputs(), tableDesc), conf); } private Task alterSinglePartition( @@ -364,48 +381,54 @@ private Task alterSinglePartition( ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn) { addPartitionDesc.setReplaceMode(true); addPartitionDesc.getPartition(0).setLocation(ptn.getLocation()); // use existing location - return TaskFactory.get(new DDLWork( - getInputs(), - getOutputs(), - addPartitionDesc - ), conf); + @SuppressWarnings("unchecked") + Task r = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf); + return r; } private Task addSinglePartition(URI fromURI, FileSystem fs, CreateTableDesc tblDesc, - Table table, Warehouse wh, - AddPartitionDesc addPartitionDesc, ReplicationSpec replicationSpec) + Table table, Warehouse wh, AddPartitionDesc addPartitionDesc, + ReplicationSpec replicationSpec, Long mmWriteId, boolean isSourceMm, Task commitTask) throws MetaException, IOException, HiveException { AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0); if (tblDesc.isExternal() && tblDesc.getLocation() == null) { LOG.debug("Importing in-place: adding AddPart for partition " + partSpecToString(partSpec.getPartSpec())); // addPartitionDesc already has the right partition location + @SuppressWarnings("unchecked") Task addPartTask = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), addPartitionDesc), conf); return addPartTask; } else { String srcLocation = partSpec.getLocation(); fixLocationInPartSpec(fs, tblDesc, table, wh, replicationSpec, partSpec); - LOG.debug("adding dependent CopyWork/AddPart/MoveWork for partition " - + partSpecToString(partSpec.getPartSpec()) - + " with source location: " + srcLocation); Path tgtLocation = new Path(partSpec.getLocation()); - Path tmpPath = ctx.getExternalTmpPath(tgtLocation); - Task copyTask = TaskFactory.get(new CopyWork(new Path(srcLocation), - tmpPath, false), conf); - Task addPartTask = TaskFactory.get(new DDLWork(getInputs(), - getOutputs(), addPartitionDesc), conf); - LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath, - Utilities.getTableDesc(table), - partSpec.getPartSpec(), true); + Path destPath = mmWriteId == null ? ctx.getExternalTmpPath(tgtLocation) + : new Path(tgtLocation, ValidWriteIds.getMmFilePrefix(mmWriteId)); + Path moveTaskSrc = mmWriteId == null ? destPath : tgtLocation; + Utilities.LOG14535.info("adding import work for partition with source location: " + + srcLocation + "; target: " + tgtLocation + "; copy dest " + destPath + "; mm " + + mmWriteId + " (src " + isSourceMm + ") for " + partSpecToString(partSpec.getPartSpec())); + CopyWork cw = new CopyWork(new Path(srcLocation), destPath, false); + cw.setIsSourceMm(isSourceMm); + DDLWork dw = new DDLWork(getInputs(), getOutputs(), addPartitionDesc); + LoadTableDesc loadTableWork = new LoadTableDesc(moveTaskSrc, Utilities.getTableDesc(table), + partSpec.getPartSpec(), true, mmWriteId); loadTableWork.setInheritTableSpecs(false); - Task loadPartTask = TaskFactory.get(new MoveWork( - getInputs(), getOutputs(), loadTableWork, null, false), - conf); + // Do not commit the write ID from each task; need to commit once. + // TODO: we should just change the import to use a single MoveTask, like dynparts. + loadTableWork.setIntermediateInMmWrite(mmWriteId != null); + MoveWork mv = new MoveWork(getInputs(), getOutputs(), loadTableWork, null, false); + @SuppressWarnings("unchecked") + Task copyTask = TaskFactory.get(cw, conf), addPartTask = TaskFactory.get(dw, conf), + loadPartTask = TaskFactory.get(mv, conf); copyTask.addDependentTask(loadPartTask); addPartTask.addDependentTask(loadPartTask); rootTasks.add(copyTask); + if (commitTask != null) { + loadPartTask.addDependentTask(commitTask); + } return addPartTask; } } @@ -569,13 +592,11 @@ private void checkTable(Table table, CreateTableDesc tableDesc, ReplicationSpec Class replaced = HiveFileFormatUtils .getOutputFormatSubstitute(origin); if (replaced == null) { - throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE - .getMsg()); + throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg()); } importedofc = replaced.getCanonicalName(); } catch(Exception e) { - throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE - .getMsg()); + throw new SemanticException(ErrorMsg.INVALID_OUTPUT_FORMAT_TYPE.getMsg()); } if ((!existingifc.equals(importedifc)) || (!existingofc.equals(importedofc))) { @@ -682,43 +703,42 @@ private static String checkParams(Map map1, /** * Create tasks for regular import, no repl complexity */ - private void createRegularImportTasks( - List> rootTasks, - CreateTableDesc tblDesc, - List partitionDescs, - boolean isPartSpecSet, - ReplicationSpec replicationSpec, - Table table, URI fromURI, FileSystem fs, Warehouse wh) + private void createRegularImportTasks(List> rootTasks, + CreateTableDesc tblDesc, List partitionDescs, boolean isPartSpecSet, + ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh, + Long mmWriteId, boolean isSourceMm) throws HiveException, URISyntaxException, IOException, MetaException { - if (table != null){ + if (table != null) { if (table.isPartitioned()) { LOG.debug("table partitioned"); + Task ict = createImportCommitTask(table.getDbName(), table.getTableName(), mmWriteId); for (AddPartitionDesc addPartitionDesc : partitionDescs) { Map partSpec = addPartitionDesc.getPartition(0).getPartSpec(); org.apache.hadoop.hive.ql.metadata.Partition ptn = null; if ((ptn = db.getPartition(table, partSpec, false)) == null) { - rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec)); + rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, + replicationSpec, mmWriteId, isSourceMm, ict)); } else { throw new SemanticException( ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec))); } } - } else { LOG.debug("table non-partitioned"); // ensure if destination is not empty only for regular import Path tgtPath = new Path(table.getDataLocation().toString()); FileSystem tgtFs = FileSystem.get(tgtPath.toUri(), conf); checkTargetLocationEmpty(tgtFs, tgtPath, replicationSpec); - loadTable(fromURI, table, false, tgtPath); + loadTable(fromURI, table, false, tgtPath, mmWriteId, isSourceMm); } // Set this to read because we can't overwrite any existing partitions outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK)); } else { LOG.debug("table " + tblDesc.getTableName() + " does not exist"); + @SuppressWarnings("unchecked") Task t = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), tblDesc), conf); table = new Table(tblDesc.getDatabaseName(), tblDesc.getTableName()); Database parentDb = db.getDatabase(tblDesc.getDatabaseName()); @@ -728,9 +748,11 @@ private void createRegularImportTasks( outputs.add(new WriteEntity(parentDb, WriteEntity.WriteType.DDL_SHARED)); if (isPartitioned(tblDesc)) { + Task ict = createImportCommitTask( + tblDesc.getDatabaseName(), tblDesc.getTableName(), mmWriteId); for (AddPartitionDesc addPartitionDesc : partitionDescs) { - t.addDependentTask( - addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec)); + t.addDependentTask(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, + replicationSpec, mmWriteId, isSourceMm, ict)); } } else { LOG.debug("adding dependent CopyWork/MoveWork for table"); @@ -747,24 +769,30 @@ private void createRegularImportTasks( } FileSystem tgtFs = FileSystem.get(tablePath.toUri(), conf); checkTargetLocationEmpty(tgtFs, tablePath, replicationSpec); - t.addDependentTask(loadTable(fromURI, table, false, tablePath)); + t.addDependentTask(loadTable(fromURI, table, false, tablePath, mmWriteId, isSourceMm)); } } rootTasks.add(t); } } + private Task createImportCommitTask(String dbName, String tblName, Long mmWriteId) { + @SuppressWarnings("unchecked") + Task ict = (mmWriteId == null) ? null : TaskFactory.get( + new ImportCommitWork(dbName, tblName, mmWriteId), conf); + return ict; + } + /** * Create tasks for repl import */ - private void createReplImportTasks( - List> rootTasks, - CreateTableDesc tblDesc, - List partitionDescs, - boolean isPartSpecSet, ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh) + private void createReplImportTasks(List> rootTasks, + CreateTableDesc tblDesc, List partitionDescs, boolean isPartSpecSet, + ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh, + Long mmWriteId, boolean isSourceMm) throws HiveException, URISyntaxException, IOException, MetaException { - Task dr = null; + Task dr = null; WriteEntity.WriteType lockType = WriteEntity.WriteType.DDL_NO_LOCK; if ((table != null) && (isPartitioned(tblDesc) != table.isPartitioned())){ @@ -807,18 +835,21 @@ private void createReplImportTasks( lockType = WriteEntity.WriteType.DDL_SHARED; } - Task t = createTableTask(tblDesc); + Task t = createTableTask(tblDesc); table = new Table(tblDesc.getDatabaseName(), tblDesc.getTableName()); if (!replicationSpec.isMetadataOnly()) { if (isPartitioned(tblDesc)) { + Task ict = createImportCommitTask( + tblDesc.getDatabaseName(), tblDesc.getTableName(), mmWriteId); for (AddPartitionDesc addPartitionDesc : partitionDescs) { - t.addDependentTask( - addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec)); + t.addDependentTask(addSinglePartition(fromURI, fs, tblDesc, table, wh, + addPartitionDesc, replicationSpec, mmWriteId, isSourceMm, ict)); } } else { LOG.debug("adding dependent CopyWork/MoveWork for table"); - t.addDependentTask(loadTable(fromURI, table, true, new Path(tblDesc.getLocation()))); + t.addDependentTask(loadTable( + fromURI, table, true, new Path(tblDesc.getLocation()), mmWriteId, isSourceMm)); } } if (dr == null){ @@ -834,22 +865,25 @@ private void createReplImportTasks( if (table.isPartitioned()) { LOG.debug("table partitioned"); for (AddPartitionDesc addPartitionDesc : partitionDescs) { - Map partSpec = addPartitionDesc.getPartition(0).getPartSpec(); org.apache.hadoop.hive.ql.metadata.Partition ptn = null; - + Task ict = replicationSpec.isMetadataOnly() ? null : createImportCommitTask( + tblDesc.getDatabaseName(), tblDesc.getTableName(), mmWriteId); if ((ptn = db.getPartition(table, partSpec, false)) == null) { if (!replicationSpec.isMetadataOnly()){ - rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec)); + rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, + replicationSpec, mmWriteId, isSourceMm, ict)); } } else { // If replicating, then the partition already existing means we need to replace, maybe, if // the destination ptn's repl.last.id is older than the replacement's. if (replicationSpec.allowReplacementInto(ptn)){ if (!replicationSpec.isMetadataOnly()){ - rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec)); + rootTasks.add(addSinglePartition(fromURI, fs, tblDesc, table, wh, + addPartitionDesc, replicationSpec, mmWriteId, isSourceMm, ict)); } else { - rootTasks.add(alterSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, ptn)); + rootTasks.add(alterSinglePartition(fromURI, fs, tblDesc, table, wh, + addPartitionDesc, replicationSpec, ptn)); } if (lockType == WriteEntity.WriteType.DDL_NO_LOCK){ lockType = WriteEntity.WriteType.DDL_SHARED; @@ -873,7 +907,8 @@ private void createReplImportTasks( return; // silently return, table is newer than our replacement. } if (!replicationSpec.isMetadataOnly()) { - loadTable(fromURI, table, true, new Path(fromURI)); // repl-imports are replace-into + // repl-imports are replace-into + loadTable(fromURI, table, true, new Path(fromURI), mmWriteId, isSourceMm); } else { rootTasks.add(alterTableTask(tblDesc)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index a7005f137080..26274f56b22e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -46,6 +47,7 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.StatsWork; @@ -259,10 +261,19 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { } } + Long mmWriteId = null; + Table tbl = ts.tableHandle; + if (MetaStoreUtils.isMmTable(tbl.getParameters())) { + try { + mmWriteId = db.getNextTableWriteId(tbl.getDbName(), tbl.getTableName()); + } catch (HiveException e) { + throw new SemanticException(e); + } + } LoadTableDesc loadTableWork; loadTableWork = new LoadTableDesc(new Path(fromURI), - Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite); + Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite, mmWriteId); if (preservePartitionSpecs){ // Note : preservePartitionSpecs=true implies inheritTableSpecs=false but // but preservePartitionSpecs=false(default) here is not sufficient enough diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 35f34daad7e9..9d49cfd6ceb0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -333,14 +333,6 @@ public List getLoadTableWork() { return loadTableWork; } - /** - * @param loadTableWork - * the loadTableWork to set - */ - public void setLoadTableWork(List loadTableWork) { - this.loadTableWork = loadTableWork; - } - /** * @return the loadFileWork */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 17dfd0308de3..36c904950aa6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SQLForeignKey; import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryProperties; @@ -243,6 +244,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { + public static final String DUMMY_DATABASE = "_dummy_database"; public static final String DUMMY_TABLE = "_dummy_table"; public static final String SUBQUERY_TAG_1 = "-subquery1"; @@ -265,7 +267,6 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { private final Map joinContext; private final Map smbMapJoinContext; private final HashMap topToTable; - private final Map fsopToTable; private final List reduceSinkOperatorsAddedByEnforceBucketingSorting; private final HashMap> topToTableProps; private QB qb; @@ -365,7 +366,6 @@ public SemanticAnalyzer(QueryState queryState) throws SemanticException { smbMapJoinContext = new HashMap(); // Must be deterministic order map for consistent q-test output across Java versions topToTable = new LinkedHashMap(); - fsopToTable = new HashMap(); reduceSinkOperatorsAddedByEnforceBucketingSorting = new ArrayList(); topToTableProps = new HashMap>(); destTableId = 1; @@ -424,7 +424,6 @@ protected void reset(boolean clearPartsCache) { opToPartToSkewedPruner.clear(); opToSamplePruner.clear(); nameToSplitSample.clear(); - fsopToTable.clear(); resultSchema = null; createVwDesc = null; viewsExpanded = null; @@ -6472,7 +6471,8 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb, nullOrder.append(sortOrder == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC ? 'a' : 'z'); } input = genReduceSinkPlan(input, partnCols, sortCols, order.toString(), nullOrder.toString(), - maxReducers, (AcidUtils.isAcidTable(dest_tab) ? getAcidType() : AcidUtils.Operation.NOT_ACID)); + maxReducers, (AcidUtils.isAcidTable(dest_tab) ? + getAcidType(dest_tab, table_desc.getOutputFileFormatClass()) : AcidUtils.Operation.NOT_ACID)); reduceSinkOperatorsAddedByEnforceBucketingSorting.add((ReduceSinkOperator)input.getParentOperators().get(0)); ctx.setMultiFileSpray(multiFileSpray); ctx.setNumFiles(numFiles); @@ -6550,6 +6550,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) LoadTableDesc ltd = null; ListBucketingCtx lbCtx = null; Map partSpec = null; + boolean isMmTable = false, isMmCtas = false; + Long mmWriteId = null; switch (dest_type.intValue()) { case QBMetaData.DEST_TABLE: { @@ -6559,70 +6561,27 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) destTableIsTemporary = dest_tab.isTemporary(); // Is the user trying to insert into a external tables - if ((!conf.getBoolVar(HiveConf.ConfVars.HIVE_INSERT_INTO_EXTERNAL_TABLES)) && - (dest_tab.getTableType().equals(TableType.EXTERNAL_TABLE))) { - throw new SemanticException( - ErrorMsg.INSERT_EXTERNAL_TABLE.getMsg(dest_tab.getTableName())); - } + checkExternalTable(dest_tab); partSpec = qbm.getPartSpecForAlias(dest); dest_path = dest_tab.getPath(); - // If the query here is an INSERT_INTO and the target is an immutable table, - // verify that our destination is empty before proceeding - if (dest_tab.isImmutable() && - qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),dest_tab.getTableName())){ - try { - FileSystem fs = dest_path.getFileSystem(conf); - if (! MetaStoreUtils.isDirEmpty(fs,dest_path)){ - LOG.warn("Attempted write into an immutable table : " - + dest_tab.getTableName() + " : " + dest_path); - throw new SemanticException( - ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(dest_tab.getTableName())); - } - } catch (IOException ioe) { - LOG.warn("Error while trying to determine if immutable table has any data : " - + dest_tab.getTableName() + " : " + dest_path); - throw new SemanticException(ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(ioe.getMessage())); - } - } - - // check for partition - List parts = dest_tab.getPartitionKeys(); - if (parts != null && parts.size() > 0) { // table is partitioned - if (partSpec == null || partSpec.size() == 0) { // user did NOT specify partition - throw new SemanticException(generateErrorMessage( - qb.getParseInfo().getDestForClause(dest), - ErrorMsg.NEED_PARTITION_ERROR.getMsg())); - } - dpCtx = qbm.getDPCtx(dest); - if (dpCtx == null) { - dest_tab.validatePartColumnNames(partSpec, false); - dpCtx = new DynamicPartitionCtx(dest_tab, partSpec, - conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), - conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); - qbm.setDPCtx(dest, dpCtx); - } + checkImmutableTable(qb, dest_tab, dest_path, false); - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP - throw new SemanticException(generateErrorMessage( - qb.getParseInfo().getDestForClause(dest), - ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg())); - } - if (dpCtx.getSPPath() != null) { - dest_path = new Path(dest_tab.getPath(), dpCtx.getSPPath()); - } - if ((dest_tab.getNumBuckets() > 0)) { - dpCtx.setNumBuckets(dest_tab.getNumBuckets()); - } + // Check for dynamic partitions. + dpCtx = checkDynPart(qb, qbm, dest_tab, partSpec, dest); + if (dpCtx != null && dpCtx.getSPPath() != null) { + dest_path = new Path(dest_tab.getPath(), dpCtx.getSPPath()); } boolean isNonNativeTable = dest_tab.isNonNative(); - if (isNonNativeTable) { + isMmTable = MetaStoreUtils.isMmTable(dest_tab.getParameters()); + if (isNonNativeTable || isMmTable) { queryTmpdir = dest_path; } else { queryTmpdir = ctx.getTempDirForPath(dest_path); } + Utilities.LOG14535.info("create filesink w/DEST_TABLE specifying " + queryTmpdir + " from " + dest_path); if (dpCtx != null) { // set the root of the temporary path where dynamic partition columns will populate dpCtx.setRootPath(queryTmpdir); @@ -6646,12 +6605,17 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) if (!isNonNativeTable) { AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; if (destTableIsAcid) { - acidOp = getAcidType(table_desc.getOutputFileFormatClass()); - checkAcidConstraints(qb, table_desc, dest_tab); + acidOp = getAcidType(dest_tab, table_desc.getOutputFileFormatClass()); + checkAcidConstraints(qb, table_desc, dest_tab, acidOp); + } + try { + mmWriteId = getMmWriteId(dest_tab, isMmTable); + } catch (HiveException e) { + throw new SemanticException(e); } - ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp); - ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), - dest_tab.getTableName())); + boolean isReplace = !qb.getParseInfo().isInsertIntoTable( + dest_tab.getDbName(), dest_tab.getTableName()); + ltd = new LoadTableDesc(queryTmpdir, table_desc, dpCtx, acidOp, isReplace, mmWriteId); ltd.setLbCtx(lbCtx); loadTableWork.add(ltd); } else { @@ -6660,43 +6624,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) setStatsForNonNativeTable(dest_tab); } - WriteEntity output = null; - - // Here only register the whole table for post-exec hook if no DP present - // in the case of DP, we will register WriteEntity in MoveTask when the - // list of dynamically created partitions are known. - if ((dpCtx == null || dpCtx.getNumDPCols() == 0)) { - output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable)); - if (!outputs.add(output)) { - throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES - .getMsg(dest_tab.getTableName())); - } - } - if ((dpCtx != null) && (dpCtx.getNumDPCols() >= 0)) { - // No static partition specified - if (dpCtx.getNumSPCols() == 0) { - output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable), false); - outputs.add(output); - } - // part of the partition specified - // Create a DummyPartition in this case. Since, the metastore does not store partial - // partitions currently, we need to store dummy partitions - else { - try { - String ppath = dpCtx.getSPPath(); - ppath = ppath.substring(0, ppath.length() - 1); - DummyPartition p = - new DummyPartition(dest_tab, dest_tab.getDbName() - + "@" + dest_tab.getTableName() + "@" + ppath, - partSpec); - output = new WriteEntity(p, getWriteType(), false); - outputs.add(output); - } catch (HiveException e) { - throw new SemanticException(e.getMessage(), e); - } - } - } - + WriteEntity output = generateTableWriteEntity( + dest_tab, partSpec, ltd, dpCtx, isNonNativeTable); ctx.getLoadTableOutputMap().put(ltd, output); break; } @@ -6705,40 +6634,22 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_part = qbm.getDestPartitionForAlias(dest); dest_tab = dest_part.getTable(); destTableIsAcid = AcidUtils.isAcidTable(dest_tab); - if ((!conf.getBoolVar(HiveConf.ConfVars.HIVE_INSERT_INTO_EXTERNAL_TABLES)) && - dest_tab.getTableType().equals(TableType.EXTERNAL_TABLE)) { - throw new SemanticException( - ErrorMsg.INSERT_EXTERNAL_TABLE.getMsg(dest_tab.getTableName())); - } + + checkExternalTable(dest_tab); Path tabPath = dest_tab.getPath(); Path partPath = dest_part.getDataLocation(); - // If the query here is an INSERT_INTO and the target is an immutable table, - // verify that our destination is empty before proceeding - if (dest_tab.isImmutable() && - qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(),dest_tab.getTableName())){ - try { - FileSystem fs = partPath.getFileSystem(conf); - if (! MetaStoreUtils.isDirEmpty(fs,partPath)){ - LOG.warn("Attempted write into an immutable table partition : " - + dest_tab.getTableName() + " : " + partPath); - throw new SemanticException( - ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(dest_tab.getTableName())); - } - } catch (IOException ioe) { - LOG.warn("Error while trying to determine if immutable table partition has any data : " - + dest_tab.getTableName() + " : " + partPath); - throw new SemanticException(ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(ioe.getMessage())); - } - } + checkImmutableTable(qb, dest_tab, partPath, true); // if the table is in a different dfs than the partition, // replace the partition's dfs with the table's dfs. dest_path = new Path(tabPath.toUri().getScheme(), tabPath.toUri() .getAuthority(), partPath.toUri().getPath()); - queryTmpdir = ctx.getTempDirForPath(dest_path); + isMmTable = MetaStoreUtils.isMmTable(dest_tab.getParameters()); + queryTmpdir = isMmTable ? dest_path : ctx.getTempDirForPath(dest_path); + Utilities.LOG14535.info("create filesink w/DEST_PARTITION specifying " + queryTmpdir + " from " + dest_path); table_desc = Utilities.getTableDesc(dest_tab); // Add sorting/bucketing if needed @@ -6753,10 +6664,16 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) dest_part.isStoredAsSubDirectories(), conf); AcidUtils.Operation acidOp = AcidUtils.Operation.NOT_ACID; if (destTableIsAcid) { - acidOp = getAcidType(table_desc.getOutputFileFormatClass()); - checkAcidConstraints(qb, table_desc, dest_tab); + acidOp = getAcidType(dest_tab, table_desc.getOutputFileFormatClass()); + checkAcidConstraints(qb, table_desc, dest_tab, acidOp); } - ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp); + try { + mmWriteId = getMmWriteId(dest_tab, isMmTable); + } catch (HiveException e) { + // How is this a semantic exception? Stupid Java and signatures. + throw new SemanticException(e); + } + ltd = new LoadTableDesc(queryTmpdir, table_desc, dest_part.getSpec(), acidOp, mmWriteId); ltd.setReplace(!qb.getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); ltd.setLbCtx(lbCtx); @@ -6776,26 +6693,6 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) case QBMetaData.DEST_DFS_FILE: { dest_path = new Path(qbm.getDestFileForAlias(dest)); - if (isLocal) { - // for local directory - we always write to map-red intermediate - // store and then copy to local fs - queryTmpdir = ctx.getMRTmpPath(); - } else { - // otherwise write to the file system implied by the directory - // no copy is required. we may want to revisit this policy in future - - try { - Path qPath = FileUtils.makeQualified(dest_path, conf); - queryTmpdir = ctx.getTempDirForPath(qPath); - } catch (Exception e) { - throw new SemanticException("Error creating temporary folder on: " - + dest_path, e); - } - } - String cols = ""; - String colTypes = ""; - ArrayList colInfos = inputRR.getColumnInfos(); - // CTAS case: the file output format and serde are defined by the create // table command rather than taking the default value List field_schemas = null; @@ -6805,64 +6702,39 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) field_schemas = new ArrayList(); destTableIsTemporary = tblDesc.isTemporary(); destTableIsMaterialization = tblDesc.isMaterialization(); + if (MetaStoreUtils.isMmTable(tblDesc.getTblProps())) { + isMmTable = isMmCtas = true; + // TODO# this should really get current ACID txn; assuming ACID works correctly the txn + // should have been opened to create the ACID table. For now use the first ID. + mmWriteId = 0l; + tblDesc.setInitialMmWriteId(mmWriteId); + } } else if (viewDesc != null) { field_schemas = new ArrayList(); destTableIsTemporary = false; } - boolean first = true; - for (ColumnInfo colInfo : colInfos) { - String[] nm = inputRR.reverseLookup(colInfo.getInternalName()); - - if (nm[1] != null) { // non-null column alias - colInfo.setAlias(nm[1]); - } - - String colName = colInfo.getInternalName(); //default column name - if (field_schemas != null) { - FieldSchema col = new FieldSchema(); - if (!("".equals(nm[0])) && nm[1] != null) { - colName = unescapeIdentifier(colInfo.getAlias()).toLowerCase(); // remove `` - } - colName = fixCtasColumnName(colName); - col.setName(colName); - String typeName = colInfo.getType().getTypeName(); - // CTAS should NOT create a VOID type - if (typeName.equals(serdeConstants.VOID_TYPE_NAME)) { - throw new SemanticException(ErrorMsg.CTAS_CREATES_VOID_TYPE - .getMsg(colName)); - } - col.setType(typeName); - field_schemas.add(col); - } - - if (!first) { - cols = cols.concat(","); - colTypes = colTypes.concat(":"); - } - - first = false; - cols = cols.concat(colName); - - // Replace VOID type with string when the output is a temp table or - // local files. - // A VOID type can be generated under the query: - // - // select NULL from tt; - // or - // insert overwrite local directory "abc" select NULL from tt; - // - // where there is no column type to which the NULL value should be - // converted. - // - String tName = colInfo.getType().getTypeName(); - if (tName.equals(serdeConstants.VOID_TYPE_NAME)) { - colTypes = colTypes.concat(serdeConstants.STRING_TYPE_NAME); - } else { - colTypes = colTypes.concat(tName); + if (isLocal) { + assert !isMmTable; + // for local directory - we always write to map-red intermediate + // store and then copy to local fs + queryTmpdir = ctx.getMRTmpPath(); + } else { + // otherwise write to the file system implied by the directory + // no copy is required. we may want to revisit this policy in future + try { + Path qPath = FileUtils.makeQualified(dest_path, conf); + queryTmpdir = isMmTable ? qPath : ctx.getTempDirForPath(qPath); + Utilities.LOG14535.info("Setting query directory " + queryTmpdir + " from " + dest_path + " (" + isMmTable + ")"); + } catch (Exception e) { + throw new SemanticException("Error creating temporary folder on: " + + dest_path, e); } } + ColsAndTypes ct = deriveFileSinkColTypes(inputRR, field_schemas); + String cols = ct.cols, colTypes = ct.colTypes; + // update the create table descriptor with the resulting schema. if (tblDesc != null) { tblDesc.setCols(new ArrayList(field_schemas)); @@ -6879,8 +6751,9 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } boolean isDfsDir = (dest_type.intValue() == QBMetaData.DEST_DFS_FILE); - loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, queryTmpdir, dest_path, isDfsDir, cols, - colTypes)); + // Create LFD even for MM CTAS - it's a no-op move, but it still seems to be uses for stats. + loadFileWork.add(new LoadFileDesc(tblDesc, viewDesc, + queryTmpdir, dest_path, isDfsDir, cols, colTypes)); if (tblDesc == null) { if (viewDesc != null) { @@ -6961,6 +6834,116 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) genPartnCols(dest, input, qb, table_desc, dest_tab, rsCtx); } + assert isMmTable == (mmWriteId != null); + FileSinkDesc fileSinkDesc = createFileSinkDesc(table_desc, dest_part, + dest_path, currentTableId, destTableIsAcid, destTableIsTemporary, + destTableIsMaterialization, queryTmpdir, rsCtx, dpCtx, lbCtx, fsRS, + canBeMerged, mmWriteId); + if (isMmCtas) { + // Add FSD so that the LoadTask compilation could fix up its path to avoid the move. + tableDesc.setWriter(fileSinkDesc); + } + + Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( + fileSinkDesc, fsRS, input), inputRR); + + handleLineage(ltd, output); + + if (LOG.isDebugEnabled()) { + LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: " + + dest_path + " row schema: " + inputRR.toString()); + } + + FileSinkOperator fso = (FileSinkOperator) output; + fso.getConf().setTable(dest_tab); + // the following code is used to collect column stats when + // hive.stats.autogather=true + // and it is an insert overwrite or insert into table + if (dest_tab != null && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) + && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) + && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) { + if (dest_type.intValue() == QBMetaData.DEST_TABLE) { + genAutoColumnStatsGatheringPipeline(qb, table_desc, partSpec, input, qb.getParseInfo() + .isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); + } else if (dest_type.intValue() == QBMetaData.DEST_PARTITION) { + genAutoColumnStatsGatheringPipeline(qb, table_desc, dest_part.getSpec(), input, qb + .getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); + + } + } + return output; + } + + private ColsAndTypes deriveFileSinkColTypes( + RowResolver inputRR, List field_schemas) throws SemanticException { + ColsAndTypes result = new ColsAndTypes("", ""); + ArrayList colInfos = inputRR.getColumnInfos(); + boolean first = true; + for (ColumnInfo colInfo : colInfos) { + String[] nm = inputRR.reverseLookup(colInfo.getInternalName()); + + if (nm[1] != null) { // non-null column alias + colInfo.setAlias(nm[1]); + } + + String colName = colInfo.getInternalName(); //default column name + if (field_schemas != null) { + FieldSchema col = new FieldSchema(); + if (!("".equals(nm[0])) && nm[1] != null) { + colName = unescapeIdentifier(colInfo.getAlias()).toLowerCase(); // remove `` + } + colName = fixCtasColumnName(colName); + col.setName(colName); + String typeName = colInfo.getType().getTypeName(); + // CTAS should NOT create a VOID type + if (typeName.equals(serdeConstants.VOID_TYPE_NAME)) { + throw new SemanticException(ErrorMsg.CTAS_CREATES_VOID_TYPE.getMsg(colName)); + } + col.setType(typeName); + field_schemas.add(col); + } + + if (!first) { + result.cols = result.cols.concat(","); + result.colTypes = result.colTypes.concat(":"); + } + + first = false; + result.cols = result.cols.concat(colName); + + // Replace VOID type with string when the output is a temp table or + // local files. + // A VOID type can be generated under the query: + // + // select NULL from tt; + // or + // insert overwrite local directory "abc" select NULL from tt; + // + // where there is no column type to which the NULL value should be + // converted. + // + String tName = colInfo.getType().getTypeName(); + if (tName.equals(serdeConstants.VOID_TYPE_NAME)) { + result.colTypes = result.colTypes.concat(serdeConstants.STRING_TYPE_NAME); + } else { + result.colTypes = result.colTypes.concat(tName); + } + } + return result; + } + + private static Long getMmWriteId(Table tbl, boolean isMmTable) throws HiveException { + if (!isMmTable) return null; + // Get the next write ID for this table. We will prefix files with this write ID. + return Hive.get().getNextTableWriteId(tbl.getDbName(), tbl.getTableName()); + } + + private FileSinkDesc createFileSinkDesc(TableDesc table_desc, + Partition dest_part, Path dest_path, int currentTableId, + boolean destTableIsAcid, boolean destTableIsTemporary, + boolean destTableIsMaterialization, Path queryTmpdir, + SortBucketRSCtx rsCtx, DynamicPartitionCtx dpCtx, ListBucketingCtx lbCtx, + RowSchema fsRS, boolean canBeMerged, Long mmWriteId) throws SemanticException { FileSinkDesc fileSinkDesc = new FileSinkDesc( queryTmpdir, table_desc, @@ -6972,12 +6955,13 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) rsCtx.getTotalFiles(), rsCtx.getPartnCols(), dpCtx, - dest_path); + dest_path, + mmWriteId); fileSinkDesc.setHiveServerQuery(SessionState.get().isHiveServerQuery()); // If this is an insert, update, or delete on an ACID table then mark that so the // FileSinkOperator knows how to properly write to it. - if (destTableIsAcid) { + if (destTableIsAcid && !AcidUtils.isInsertOnlyTable(dest_part.getTable())) { AcidUtils.Operation wt = updating() ? AcidUtils.Operation.UPDATE : (deleting() ? AcidUtils.Operation.DELETE : AcidUtils.Operation.INSERT); fileSinkDesc.setWriteType(wt); @@ -7016,10 +7000,11 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } else if (dpCtx != null) { fileSinkDesc.setStaticSpec(dpCtx.getSPPath()); } + return fileSinkDesc; + } - Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - fileSinkDesc, fsRS, input), inputRR); - + private void handleLineage(LoadTableDesc ltd, Operator output) + throws SemanticException { if (ltd != null && SessionState.get() != null) { SessionState.get().getLineageState() .mapDirToFop(ltd.getSourcePath(), (FileSinkOperator) output); @@ -7037,33 +7022,111 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) SessionState.get().getLineageState() .mapDirToFop(tlocation, (FileSinkOperator) output); } + } - if (LOG.isDebugEnabled()) { - LOG.debug("Created FileSink Plan for clause: " + dest + "dest_path: " - + dest_path + " row schema: " + inputRR.toString()); + private WriteEntity generateTableWriteEntity(Table dest_tab, + Map partSpec, LoadTableDesc ltd, + DynamicPartitionCtx dpCtx, boolean isNonNativeTable) + throws SemanticException { + WriteEntity output = null; + + // Here only register the whole table for post-exec hook if no DP present + // in the case of DP, we will register WriteEntity in MoveTask when the + // list of dynamically created partitions are known. + if ((dpCtx == null || dpCtx.getNumDPCols() == 0)) { + output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable)); + if (!outputs.add(output)) { + throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES + .getMsg(dest_tab.getTableName())); + } + } + if ((dpCtx != null) && (dpCtx.getNumDPCols() >= 0)) { + // No static partition specified + if (dpCtx.getNumSPCols() == 0) { + output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable), false); + outputs.add(output); + } + // part of the partition specified + // Create a DummyPartition in this case. Since, the metastore does not store partial + // partitions currently, we need to store dummy partitions + else { + try { + String ppath = dpCtx.getSPPath(); + ppath = ppath.substring(0, ppath.length() - 1); + DummyPartition p = + new DummyPartition(dest_tab, dest_tab.getDbName() + + "@" + dest_tab.getTableName() + "@" + ppath, + partSpec); + output = new WriteEntity(p, getWriteType(), false); + outputs.add(output); + } catch (HiveException e) { + throw new SemanticException(e.getMessage(), e); + } + } } + return output; + } - FileSinkOperator fso = (FileSinkOperator) output; - fso.getConf().setTable(dest_tab); - fsopToTable.put(fso, dest_tab); - // the following code is used to collect column stats when - // hive.stats.autogather=true - // and it is an insert overwrite or insert into table - if (dest_tab != null && conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) - && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) - && ColumnStatsAutoGatherContext.canRunAutogatherStats(fso)) { - if (dest_type.intValue() == QBMetaData.DEST_TABLE) { - genAutoColumnStatsGatheringPipeline(qb, table_desc, partSpec, input, qb.getParseInfo() - .isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); - } else if (dest_type.intValue() == QBMetaData.DEST_PARTITION) { - genAutoColumnStatsGatheringPipeline(qb, table_desc, dest_part.getSpec(), input, qb - .getParseInfo().isInsertIntoTable(dest_tab.getDbName(), dest_tab.getTableName())); + private void checkExternalTable(Table dest_tab) throws SemanticException { + if ((!conf.getBoolVar(HiveConf.ConfVars.HIVE_INSERT_INTO_EXTERNAL_TABLES)) && + (dest_tab.getTableType().equals(TableType.EXTERNAL_TABLE))) { + throw new SemanticException( + ErrorMsg.INSERT_EXTERNAL_TABLE.getMsg(dest_tab.getTableName())); + } + } + private void checkImmutableTable(QB qb, Table dest_tab, Path dest_path, boolean isPart) + throws SemanticException { + // If the query here is an INSERT_INTO and the target is an immutable table, + // verify that our destination is empty before proceeding + if (!dest_tab.isImmutable() || !qb.getParseInfo().isInsertIntoTable( + dest_tab.getDbName(), dest_tab.getTableName())) { + return; + } + try { + FileSystem fs = dest_path.getFileSystem(conf); + if (! MetaStoreUtils.isDirEmpty(fs,dest_path)){ + LOG.warn("Attempted write into an immutable table : " + + dest_tab.getTableName() + " : " + dest_path); + throw new SemanticException( + ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(dest_tab.getTableName())); } + } catch (IOException ioe) { + LOG.warn("Error while trying to determine if immutable table " + + (isPart ? "partition " : "") + "has any data : " + dest_tab.getTableName() + + " : " + dest_path); + throw new SemanticException(ErrorMsg.INSERT_INTO_IMMUTABLE_TABLE.getMsg(ioe.getMessage())); } - return output; } + private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab, + Map partSpec, String dest) throws SemanticException { + List parts = dest_tab.getPartitionKeys(); + if (parts == null || parts.isEmpty()) return null; // table is not partitioned + if (partSpec == null || partSpec.size() == 0) { // user did NOT specify partition + throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), + ErrorMsg.NEED_PARTITION_ERROR.getMsg())); + } + DynamicPartitionCtx dpCtx = qbm.getDPCtx(dest); + if (dpCtx == null) { + dest_tab.validatePartColumnNames(partSpec, false); + dpCtx = new DynamicPartitionCtx(dest_tab, partSpec, + conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), + conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); + qbm.setDPCtx(dest, dpCtx); + } + + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP + throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), + ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg())); + } + if ((dest_tab.getNumBuckets() > 0)) { + dpCtx.setNumBuckets(dest_tab.getNumBuckets()); + } + return dpCtx; + } + + private void genAutoColumnStatsGatheringPipeline(QB qb, TableDesc table_desc, Map partSpec, Operator curr, boolean isInsertInto) throws SemanticException { String tableName = table_desc.getTableName(); @@ -7094,7 +7157,7 @@ String fixCtasColumnName(String colName) { // This method assumes you have already decided that this is an Acid write. Don't call it if // that isn't true. private void checkAcidConstraints(QB qb, TableDesc tableDesc, - Table table) throws SemanticException { + Table table, AcidUtils.Operation acidOp) throws SemanticException { String tableName = tableDesc.getTableName(); if (!qb.getParseInfo().isInsertIntoTable(tableName)) { LOG.debug("Couldn't find table " + tableName + " in insertIntoTable"); @@ -7111,15 +7174,14 @@ These props are now enabled elsewhere (see commit diffs). It would be better in */ conf.set(AcidUtils.CONF_ACID_KEY, "true"); - if (table.getNumBuckets() < 1) { - throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, table.getTableName()); - } - if (table.getSortCols() != null && table.getSortCols().size() > 0) { - throw new SemanticException(ErrorMsg.ACID_NO_SORTED_BUCKETS, table.getTableName()); + if (!Operation.NOT_ACID.equals(acidOp) && !Operation.INSERT_ONLY.equals(acidOp)) { + if (table.getNumBuckets() < 1) { + throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, table.getTableName()); + } + if (table.getSortCols() != null && table.getSortCols().size() > 0) { + throw new SemanticException(ErrorMsg.ACID_NO_SORTED_BUCKETS, table.getTableName()); + } } - - - } /** @@ -10117,7 +10179,7 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String if (partitions != null) { for (Partition partn : partitions) { // inputs.add(new ReadEntity(partn)); // is this needed at all? - LOG.info("XXX: adding part: "+partn); + LOG.info("XXX: adding part: "+partn); outputs.add(new WriteEntity(partn, WriteEntity.WriteType.DDL_NO_LOCK)); } } @@ -11781,7 +11843,7 @@ ASTNode analyzeCreateTable( } } - if(location != null && location.length() != 0) { + if (location != null && location.length() != 0) { Path locPath = new Path(location); FileSystem curFs = null; FileStatus locStats = null; @@ -11790,7 +11852,7 @@ ASTNode analyzeCreateTable( if(curFs != null) { locStats = curFs.getFileStatus(locPath); } - if(locStats != null && locStats.isDir()) { + if (locStats != null && locStats.isDir()) { FileStatus[] lStats = curFs.listStatus(locPath); if(lStats != null && lStats.length != 0) { throw new SemanticException(ErrorMsg.CTAS_LOCATION_NONEMPTY.getMsg(location)); @@ -11807,14 +11869,13 @@ ASTNode analyzeCreateTable( } tblProps = addDefaultProperties(tblProps); - tableDesc = new CreateTableDesc(qualifiedTabName[0], dbDotTab, isExt, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, rowFormatParams.fieldDelim, rowFormatParams.fieldEscape, rowFormatParams.collItemDelim, rowFormatParams.mapKeyDelim, rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, - skewedColNames, skewedValues, true, primaryKeys, foreignKeys); + skewedColNames, skewedValues, true, primaryKeys, foreignKeys); tableDesc.setMaterialization(isMaterialization); tableDesc.setStoredAsSubDirectories(storedAsDirs); tableDesc.setNullFormat(rowFormatParams.nullFormat); @@ -13075,9 +13136,11 @@ private AcidUtils.Operation getAcidType() { AcidUtils.Operation.INSERT); } - private AcidUtils.Operation getAcidType(Class of) { + private AcidUtils.Operation getAcidType(Table table, Class of) { if (SessionState.get() == null || !SessionState.get().getTxnMgr().supportsAcid()) { return AcidUtils.Operation.NOT_ACID; + } else if (AcidUtils.isInsertOnlyTable(table)) { + return AcidUtils.Operation.INSERT_ONLY; } else if (isAcidOutputFormat(of)) { return getAcidType(); } else { @@ -13152,4 +13215,12 @@ public void setLoadFileWork(List loadFileWork) { this.loadFileWork = loadFileWork; } + private static final class ColsAndTypes { + public ColsAndTypes(String cols, String colTypes) { + this.cols = cols; + this.colTypes = colTypes; + } + public String cols; + public String colTypes; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 97cf58521403..9b2f005679da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -34,7 +34,9 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.HiveStatsUtils; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.ql.Context; @@ -62,6 +64,7 @@ import org.apache.hadoop.hive.ql.plan.CreateViewDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.LoadFileDesc; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; import org.apache.hadoop.hive.ql.plan.MoveWork; @@ -223,45 +226,15 @@ public void compile(final ParseContext pCtx, final List> leafTasks = new LinkedHashSet>(); - getLeafTasks(rootTasks, leafTasks); - if (isCStats) { - genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, leafTasks, outerQueryLimit, 0); - } else { - for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx - .getColumnStatsAutoGatherContexts()) { - if (!columnStatsAutoGatherContext.isInsertInto()) { - genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), - columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, 0); - } else { - int numBitVector; - try { - numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf); - } catch (Exception e) { - throw new SemanticException(e.getMessage()); - } - genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), - columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, numBitVector); - } - } - } + createColumnStatsTasks(pCtx, rootTasks, loadFileWork, isCStats, outerQueryLimit); } decideExecMode(rootTasks, ctx, globalLimitCtx); @@ -355,6 +307,80 @@ public void compile(final ParseContext pCtx, final List> rootTasks, + List loadFileWork, boolean isCStats, int outerQueryLimit) + throws SemanticException { + Set> leafTasks = new LinkedHashSet>(); + getLeafTasks(rootTasks, leafTasks); + if (isCStats) { + genColumnStatsTask(pCtx.getAnalyzeRewrite(), loadFileWork, leafTasks, outerQueryLimit, 0); + } else { + for (ColumnStatsAutoGatherContext columnStatsAutoGatherContext : pCtx + .getColumnStatsAutoGatherContexts()) { + if (!columnStatsAutoGatherContext.isInsertInto()) { + genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), + columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, 0); + } else { + int numBitVector; + try { + numBitVector = HiveStatsUtils.getNumBitVectorsForNDVEstimation(conf); + } catch (Exception e) { + throw new SemanticException(e.getMessage()); + } + genColumnStatsTask(columnStatsAutoGatherContext.getAnalyzeRewrite(), + columnStatsAutoGatherContext.getLoadFileWork(), leafTasks, outerQueryLimit, numBitVector); + } + } + } + } + + private Path getDefaultCtasLocation(final ParseContext pCtx) throws SemanticException { + try { + String protoName = null; + if (pCtx.getQueryProperties().isCTAS()) { + protoName = pCtx.getCreateTable().getTableName(); + } else if (pCtx.getQueryProperties().isMaterializedView()) { + protoName = pCtx.getCreateViewDesc().getViewName(); + } + String[] names = Utilities.getDbTableName(protoName); + if (!db.databaseExists(names[0])) { + throw new SemanticException("ERROR: The database " + names[0] + " does not exist."); + } + Warehouse wh = new Warehouse(conf); + return wh.getTablePath(db.getDatabase(names[0]), names[1]); + } catch (HiveException e) { + throw new SemanticException(e); + } catch (MetaException e) { + throw new SemanticException(e); + } + } + private void patchUpAfterCTASorMaterializedView(final List> rootTasks, final HashSet outputs, Task createTask) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java index 02896ff6a61e..26f1d7055b71 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckCtx.java @@ -160,7 +160,7 @@ public void setError(String error, ASTNode errorSrcNode) { if (LOG.isDebugEnabled()) { // Logger the callstack from which the error has been set. LOG.debug("Setting error: [" + error + "] from " - + ((errorSrcNode == null) ? "null" : errorSrcNode.toStringTree()), new Exception()); + + ((errorSrcNode == null) ? "null" : errorSrcNode.toStringTree())/*, new Exception()*/); } this.error = error; this.errorSrcNode = errorSrcNode; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java index 68b0ad9ea63f..4635f185f123 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.ql.exec.Task; +import org.apache.hadoop.hive.ql.exec.Utilities; /** * Conditional task resolution interface. This is invoked at run time to get the @@ -74,14 +75,6 @@ public String getDir() { return dir; } - /** - * @param dir - * the dir to set - */ - public void setDir(String dir) { - this.dir = dir; - } - /** * @return the listTasks */ @@ -120,8 +113,7 @@ public void setLbCtx(ListBucketingCtx lbCtx) { } } - public List> getTasks(HiveConf conf, - Object objCtx) { + public List> getTasks(HiveConf conf, Object objCtx) { ConditionalResolverMergeFilesCtx ctx = (ConditionalResolverMergeFilesCtx) objCtx; String dirName = ctx.getDir(); @@ -178,6 +170,8 @@ public List> getTasks(HiveConf conf, if(lbLevel == 0) { // static partition without list bucketing long totalSz = getMergeSize(inpFs, dirPath, avgConditionSize); + Utilities.LOG14535.info("merge resolve simple case - totalSz " + totalSz + " from " + dirPath); + if (totalSz >= 0) { // add the merge job setupMapRedWork(conf, work, trgtSize, totalSz); resTsks.add(mrTask); @@ -191,6 +185,7 @@ public List> getTasks(HiveConf conf, } } } else { + Utilities.LOG14535.info("Resolver returning movetask for " + dirPath); resTsks.add(mvTask); } } catch (IOException e) { @@ -233,6 +228,7 @@ private void generateActualTasks(HiveConf conf, List mrTask, Task mrAndMvTask, Path dirPath, FileSystem inpFs, ConditionalResolverMergeFilesCtx ctx, MapWork work, int dpLbLevel) throws IOException { + Utilities.LOG14535.info("generateActualTasks for " + dirPath); DynamicPartitionCtx dpCtx = ctx.getDPCtx(); // get list of dynamic partitions FileStatus[] status = HiveStatsUtils.getFileStatusRecurse(dirPath, dpLbLevel, inpFs); @@ -243,6 +239,7 @@ private void generateActualTasks(HiveConf conf, List 0) { + // Note: this path should be specific to concatenate; never executed in a select query. // modify the existing move task as it is already in the candidate running tasks // running the MoveTask and MR task in parallel may @@ -357,6 +358,7 @@ private AverageSize getAverageSize(FileSystem inpFs, Path dirPath) { long totalSz = 0; int numFiles = 0; for (FileStatus fStat : fStats) { + Utilities.LOG14535.info("Resolver looking at " + fStat.getPath()); if (fStat.isDir()) { AverageSize avgSzDir = getAverageSize(inpFs, fStat.getPath()); if (avgSzDir.getTotalSize() < 0) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java index 9a4e782af89f..2e484baaf976 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java @@ -33,6 +33,7 @@ public class CopyWork implements Serializable { private Path fromPath; private Path toPath; private boolean errorOnSrcEmpty; + private boolean isMm = false; public CopyWork() { } @@ -65,4 +66,12 @@ public boolean isErrorOnSrcEmpty() { return errorOnSrcEmpty; } + public void setIsSourceMm(boolean isMm) { + this.isMm = isMm; + } + + public boolean isSourceMm() { + return isMm ; + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index eafba2147640..4b452b6ca249 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -94,6 +94,10 @@ public class CreateTableDesc extends DDLDesc implements Serializable { private boolean isCTAS = false; List primaryKeys; List foreignKeys; + private Long initialMmWriteId; // Initial MM write ID for CTAS and import. + // The FSOP configuration for the FSOP that is going to write initial data during ctas. + // This is not needed beyond compilation, so it is transient. + private transient FileSinkDesc writer; public CreateTableDesc() { } @@ -825,5 +829,23 @@ public Table toTable(HiveConf conf) throws HiveException { return tbl; } + public void setInitialMmWriteId(Long mmWriteId) { + this.initialMmWriteId = mmWriteId; + } + + public Long getInitialMmWriteId() { + return initialMmWriteId; + } + + + public FileSinkDesc getAndUnsetWriter() { + FileSinkDesc fsd = writer; + writer = null; + return fsd; + } + + public void setWriter(FileSinkDesc writer) { + this.writer = writer; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java index 7ec1bdd5e50c..615c63de1423 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileMergeDesc.java @@ -28,6 +28,7 @@ public class FileMergeDesc extends AbstractOperatorDesc { private int listBucketingDepth; private boolean hasDynamicPartitions; private boolean isListBucketingAlterTableConcatenate; + private Long mmWriteId; public FileMergeDesc(DynamicPartitionCtx dynPartCtx, Path outputDir) { this.dpCtx = dynPartCtx; @@ -73,4 +74,12 @@ public boolean isListBucketingAlterTableConcatenate() { public void setListBucketingAlterTableConcatenate(boolean isListBucketingAlterTableConcatenate) { this.isListBucketingAlterTableConcatenate = isListBucketingAlterTableConcatenate; } + + public Long getMmWriteId() { + return mmWriteId; + } + + public void setMmWriteId(Long mmWriteId) { + this.mmWriteId = mmWriteId; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java index 07ed4fd5937f..1f84531ff43f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java @@ -22,6 +22,7 @@ import java.util.List; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.ValidWriteIds; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -81,7 +82,6 @@ public enum DPSortState { // the sub-queries write to sub-directories of a common directory. So, the file sink // descriptors for subq1 and subq2 are linked. private boolean linkedFileSink = false; - private Path parentDir; transient private List linkedFileSinkDesc; private boolean statsReliable; @@ -96,6 +96,8 @@ public enum DPSortState { private transient Table table; private Path destPath; private boolean isHiveServerQuery; + private Long mmWriteId; + private boolean isMerge; public FileSinkDesc() { } @@ -106,7 +108,8 @@ public FileSinkDesc() { public FileSinkDesc(final Path dirName, final TableDesc tableInfo, final boolean compressed, final int destTableId, final boolean multiFileSpray, final boolean canBeMerged, final int numFiles, final int totalFiles, - final ArrayList partitionCols, final DynamicPartitionCtx dpCtx, Path destPath) { + final ArrayList partitionCols, final DynamicPartitionCtx dpCtx, Path destPath, + Long mmWriteId) { this.dirName = dirName; this.tableInfo = tableInfo; @@ -120,6 +123,7 @@ public FileSinkDesc(final Path dirName, final TableDesc tableInfo, this.dpCtx = dpCtx; this.dpSortState = DPSortState.NONE; this.destPath = destPath; + this.mmWriteId = mmWriteId; } public FileSinkDesc(final Path dirName, final TableDesc tableInfo, @@ -141,20 +145,20 @@ public FileSinkDesc(final Path dirName, final TableDesc tableInfo, public Object clone() throws CloneNotSupportedException { FileSinkDesc ret = new FileSinkDesc(dirName, tableInfo, compressed, destTableId, multiFileSpray, canBeMerged, numFiles, totalFiles, - partitionCols, dpCtx, destPath); + partitionCols, dpCtx, destPath, mmWriteId); ret.setCompressCodec(compressCodec); ret.setCompressType(compressType); ret.setGatherStats(gatherStats); ret.setStaticSpec(staticSpec); ret.setStatsAggPrefix(statsKeyPref); ret.setLinkedFileSink(linkedFileSink); - ret.setParentDir(parentDir); ret.setLinkedFileSinkDesc(linkedFileSinkDesc); ret.setStatsReliable(statsReliable); ret.setDpSortState(dpSortState); ret.setWriteType(writeType); ret.setTransactionId(txnId); ret.setStatsTmpDir(statsTmpDir); + ret.setIsMerge(isMerge); return ret; } @@ -176,7 +180,14 @@ public void setDirName(final Path dirName) { } public Path getFinalDirName() { - return linkedFileSink ? parentDir : dirName; + return linkedFileSink ? dirName.getParent() : dirName; + } + + /** getFinalDirName that takes into account MM, but not DP, LB or buckets. */ + public Path getMergeInputDirName() { + Path root = getFinalDirName(); + if (mmWriteId == null) return root; + return new Path(root, ValidWriteIds.getMmFilePrefix(mmWriteId)); } @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -248,6 +259,14 @@ public void setTemporary(boolean temporary) { this.temporary = temporary; } + public boolean isMmTable() { + return mmWriteId != null; + } + + public Long getMmWriteId() { + return mmWriteId; + } + public boolean isMaterialization() { return materialization; } @@ -376,11 +395,7 @@ public void setLinkedFileSink(boolean linkedFileSink) { } public Path getParentDir() { - return parentDir; - } - - public void setParentDir(Path parentDir) { - this.parentDir = parentDir; + return dirName.getParent(); } public boolean isStatsReliable() { @@ -474,4 +489,15 @@ public void setStatsTmpDir(String statsCollectionTempDir) { this.statsTmpDir = statsCollectionTempDir; } + public void setMmWriteId(Long mmWriteId) { + this.mmWriteId = mmWriteId; + } + + public void setIsMerge(boolean b) { + this.isMerge = b; + } + + public boolean isMerge() { + return isMerge; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java index bcd3125ab4ad..d46f71eddc70 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java @@ -42,5 +42,8 @@ public LoadDesc(final Path sourcePath) { public Path getSourcePath() { return sourcePath; } - + + public void setSourcePath(Path path) { + this.sourcePath = path; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java index 9a868a04ce93..072148c70c09 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.PTFUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; /** * LoadFileDesc. @@ -59,6 +60,7 @@ public LoadFileDesc(final Path sourcePath, final Path targetDir, final boolean isDfsDir, final String columns, final String columnTypes) { super(sourcePath); + Utilities.LOG14535.info("creating LFD from " + sourcePath + " to " + targetDir); this.targetDir = targetDir; this.isDfsDir = isDfsDir; this.columns = columns; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index 771a919ccd0b..7039f1f37568 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -23,6 +23,7 @@ import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -41,18 +42,21 @@ public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc // Need to remember whether this is an acid compliant operation, and if so whether it is an // insert, update, or delete. private AcidUtils.Operation writeType; + private Long mmWriteId; // TODO: the below seems like they should just be combined into partitionDesc private org.apache.hadoop.hive.ql.plan.TableDesc table; private Map partitionSpec; // NOTE: this partitionSpec has to be ordered map + private boolean commitMmWriteId = true; - public LoadTableDesc(final Path sourcePath, + private LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, final boolean replace, - final AcidUtils.Operation writeType) { + final AcidUtils.Operation writeType, Long mmWriteId) { super(sourcePath); - init(table, partitionSpec, replace, writeType); + Utilities.LOG14535.info("creating part LTD from " + sourcePath + " to " + table.getTableName()/*, new Exception()*/); + init(table, partitionSpec, replace, writeType, mmWriteId); } /** @@ -65,15 +69,16 @@ public LoadTableDesc(final Path sourcePath, public LoadTableDesc(final Path sourcePath, final TableDesc table, final Map partitionSpec, - final boolean replace) { - this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID); + final boolean replace, + final Long mmWriteId) { + this(sourcePath, table, partitionSpec, replace, AcidUtils.Operation.NOT_ACID, mmWriteId); } public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, - final AcidUtils.Operation writeType) { - this(sourcePath, table, partitionSpec, true, writeType); + final AcidUtils.Operation writeType, Long mmWriteId) { + this(sourcePath, table, partitionSpec, true, writeType, mmWriteId); } /** @@ -84,20 +89,22 @@ public LoadTableDesc(final Path sourcePath, */ public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, - final Map partitionSpec) { - this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID); + final Map partitionSpec, Long mmWriteId) { + this(sourcePath, table, partitionSpec, true, AcidUtils.Operation.NOT_ACID, mmWriteId); } public LoadTableDesc(final Path sourcePath, final org.apache.hadoop.hive.ql.plan.TableDesc table, final DynamicPartitionCtx dpCtx, - final AcidUtils.Operation writeType) { + final AcidUtils.Operation writeType, + boolean isReplace, Long mmWriteId) { super(sourcePath); + Utilities.LOG14535.info("creating LTD from " + sourcePath + " to " + table.getTableName()/*, new Exception()*/); this.dpCtx = dpCtx; if (dpCtx != null && dpCtx.getPartSpec() != null && partitionSpec == null) { - init(table, dpCtx.getPartSpec(), true, writeType); + init(table, dpCtx.getPartSpec(), isReplace, writeType, mmWriteId); } else { - init(table, new LinkedHashMap(), true, writeType); + init(table, new LinkedHashMap(), isReplace, writeType, mmWriteId); } } @@ -105,11 +112,12 @@ private void init( final org.apache.hadoop.hive.ql.plan.TableDesc table, final Map partitionSpec, final boolean replace, - AcidUtils.Operation writeType) { + AcidUtils.Operation writeType, Long mmWriteId) { this.table = table; this.partitionSpec = partitionSpec; this.replace = replace; this.writeType = writeType; + this.mmWriteId = mmWriteId; } @Explain(displayName = "table", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -135,6 +143,15 @@ public boolean getReplace() { return replace; } + @Explain(displayName = "micromanaged table") + public Boolean isMmTableExplain() { + return mmWriteId != null? true : null; + } + + public boolean isMmTable() { + return mmWriteId != null; + } + public void setReplace(boolean replace) { this.replace = replace; } @@ -172,4 +189,16 @@ public void setLbCtx(ListBucketingCtx lbCtx) { public AcidUtils.Operation getWriteType() { return writeType; } + + public Long getMmWriteId() { + return mmWriteId; + } + + public void setIntermediateInMmWrite(boolean b) { + this.commitMmWriteId = !b; + } + + public boolean isCommitMmWrite() { + return commitMmWriteId; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java index 5cc36635d1e4..1be4d84e95ee 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.mapred.JobConf; +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.Interner; /** @@ -375,6 +376,7 @@ public void setNumMapTasks(Integer numMapTasks) { } @SuppressWarnings("nls") + @VisibleForTesting public void addMapWork(Path path, String alias, Operator work, PartitionDesc pd) { ArrayList curAliases = pathToAliases.get(path); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java index 9f498c7fb88a..f0b2775579b3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java @@ -23,6 +23,7 @@ import java.util.List; import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.plan.Explain.Level; @@ -59,7 +60,7 @@ public class MoveWork implements Serializable { public MoveWork() { } - public MoveWork(HashSet inputs, HashSet outputs) { + private MoveWork(HashSet inputs, HashSet outputs) { this.inputs = inputs; this.outputs = outputs; } @@ -68,6 +69,8 @@ public MoveWork(HashSet inputs, HashSet outputs, final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork, boolean checkFileFormat, boolean srcLocal) { this(inputs, outputs); + Utilities.LOG14535.info("Creating MoveWork " + System.identityHashCode(this) + + " with " + loadTableWork + "; " + loadFileWork); this.loadTableWork = loadTableWork; this.loadFileWork = loadFileWork; this.checkFileFormat = checkFileFormat; @@ -77,10 +80,7 @@ public MoveWork(HashSet inputs, HashSet outputs, public MoveWork(HashSet inputs, HashSet outputs, final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork, boolean checkFileFormat) { - this(inputs, outputs); - this.loadTableWork = loadTableWork; - this.loadFileWork = loadFileWork; - this.checkFileFormat = checkFileFormat; + this(inputs, outputs, loadTableWork, loadFileWork, checkFileFormat, false); } @Explain(displayName = "tables", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index b15ad34a0ca5..a07237e787cf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -421,20 +421,6 @@ public static TableDesc getTableDesc(CreateViewDesc crtViewDesc, String cols, St return ret; } - /** - * Generate the table descriptor of MetadataTypedColumnsetSerDe with the - * separatorCode. MetaDataTypedColumnsetSerDe is used because LazySimpleSerDe - * does not support a table with a single column "col" with type - * "array". - */ - public static TableDesc getDefaultTableDesc(String separatorCode) { - return new TableDesc( - TextInputFormat.class, IgnoreKeyTextOutputFormat.class, Utilities - .makeProperties( - org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT,separatorCode, - serdeConstants.SERIALIZATION_LIB,MetadataTypedColumnsetSerDe.class.getName())); - } - /** * Generate the table descriptor for reduce key. */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java index 1da8e911b606..0a611f979b58 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java @@ -147,8 +147,7 @@ public String getSerdeClassName() { @Explain(displayName = "name", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public String getTableName() { - return properties - .getProperty(hive_metastoreConstants.META_TABLE_NAME); + return properties.getProperty(hive_metastoreConstants.META_TABLE_NAME); } @Explain(displayName = "input format") diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index 4d6e24ef3821..92d9f28c0f18 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -65,7 +65,6 @@ public void setHiveConf(HiveConf conf) { @Override public void setThreadId(int threadId) { this.threadId = threadId; - } @Override diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index d3c361189a4a..066d2b6d9a7e 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -146,7 +146,7 @@ public class TestExecDriver extends TestCase { db.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, src, true, true); db.createTable(src, cols, null, TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class); - db.loadTable(hadoopDataFile[i], src, false, true, false, false, false); + db.loadTable(hadoopDataFile[i], src, false, true, false, false, false, null); i++; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java index a8d7c9c461a2..909114c55a38 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java @@ -285,7 +285,7 @@ private FileSinkOperator getFileSink(AcidUtils.Operation writeType, partColMap.put(PARTCOL_NAME, null); DynamicPartitionCtx dpCtx = new DynamicPartitionCtx(null, partColMap, "Sunday", 100); //todo: does this need the finalDestination? - desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx, null); + desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx, null, null); } else { desc = new FileSinkDesc(basePath, tableDesc, false); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java index a7ff9a374944..0d177bef5591 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java @@ -33,10 +33,10 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.io.AcidUtils.AcidOperationalProperties; import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat; -import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFile; -import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockFileSystem; -import org.apache.hadoop.hive.ql.io.orc.TestInputOutputFormat.MockPath; import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId; +import org.apache.hive.common.util.MockFileSystem; +import org.apache.hive.common.util.MockFileSystem.MockFile; +import org.apache.hive.common.util.MockFileSystem.MockPath; import org.junit.Assert; import org.junit.Test; @@ -179,7 +179,7 @@ public void testOriginalDeltas() throws Exception { new MockFile("mock:/tbl/part1/delta_050_100/bucket_0", 0, new byte[0]), new MockFile("mock:/tbl/part1/delta_101_101/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = - AcidUtils.getAcidState(new TestInputOutputFormat.MockPath(fs, + AcidUtils.getAcidState(new MockPath(fs, "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); assertEquals(null, dir.getBaseDirectory()); List obsolete = dir.getObsolete(); @@ -221,7 +221,7 @@ public void testBaseDeltas() throws Exception { new MockFile("mock:/tbl/part1/delta_050_105/bucket_0", 0, new byte[0]), new MockFile("mock:/tbl/part1/delta_90_120/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = - AcidUtils.getAcidState(new TestInputOutputFormat.MockPath(fs, + AcidUtils.getAcidState(new MockPath(fs, "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_49", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); @@ -517,7 +517,7 @@ public void testBaseWithDeleteDeltas() throws Exception { new MockFile("mock:/tbl/part1/delete_delta_050_105/bucket_0", 0, new byte[0]), new MockFile("mock:/tbl/part1/delete_delta_110_110/bucket_0", 0, new byte[0])); AcidUtils.Directory dir = - AcidUtils.getAcidState(new TestInputOutputFormat.MockPath(fs, + AcidUtils.getAcidState(new MockPath(fs, "mock:/tbl/part1"), conf, new ValidReadTxnList("100:" + Long.MAX_VALUE + ":")); assertEquals("mock:/tbl/part1/base_49", dir.getBaseDirectory().toString()); List obsolete = dir.getObsolete(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 2c1bb6fe2f09..28a4f9db0130 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -21,10 +21,7 @@ import java.io.DataInput; import java.io.DataOutput; -import java.io.FileNotFoundException; import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; import java.security.PrivilegedExceptionAction; import java.sql.Date; import java.sql.Timestamp; @@ -32,27 +29,16 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; -import java.util.Map; import java.util.Properties; -import java.util.Set; import java.util.TimeZone; -import java.util.TreeSet; import org.apache.commons.codec.binary.Base64; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.BlockLocation; -import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FSInputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.conf.HiveConf; @@ -113,7 +99,11 @@ import org.apache.hadoop.mapred.RecordWriter; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Progressable; +import org.apache.hive.common.util.MockFileSystem; +import org.apache.hive.common.util.MockFileSystem.MockBlock; +import org.apache.hive.common.util.MockFileSystem.MockFile; +import org.apache.hive.common.util.MockFileSystem.MockOutputStream; +import org.apache.hive.common.util.MockFileSystem.MockPath; import org.apache.orc.OrcProto; import org.junit.Before; import org.junit.Rule; @@ -921,534 +911,6 @@ private List> createSplitStrategies( null, null, true); } - public static class MockBlock { - int offset; - int length; - final String[] hosts; - - public MockBlock(String... hosts) { - this.hosts = hosts; - } - - public void setOffset(int offset) { - this.offset = offset; - } - - public void setLength(int length) { - this.length = length; - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append("block{offset: "); - buffer.append(offset); - buffer.append(", length: "); - buffer.append(length); - buffer.append(", hosts: ["); - for(int i=0; i < hosts.length; i++) { - if (i != 0) { - buffer.append(", "); - } - buffer.append(hosts[i]); - } - buffer.append("]}"); - return buffer.toString(); - } - } - - public static class MockFile { - final Path path; - int blockSize; - int length; - MockBlock[] blocks; - byte[] content; - - public MockFile(String path, int blockSize, byte[] content, - MockBlock... blocks) { - this.path = new Path(path); - this.blockSize = blockSize; - this.blocks = blocks; - this.content = content; - this.length = content.length; - int offset = 0; - for(MockBlock block: blocks) { - block.offset = offset; - block.length = Math.min(length - offset, blockSize); - offset += block.length; - } - } - - @Override - public int hashCode() { - return path.hashCode() + 31 * length; - } - - @Override - public boolean equals(final Object obj) { - if (!(obj instanceof MockFile)) { return false; } - return ((MockFile) obj).path.equals(this.path) && ((MockFile) obj).length == this.length; - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append("mockFile{path: "); - buffer.append(path.toString()); - buffer.append(", blkSize: "); - buffer.append(blockSize); - buffer.append(", len: "); - buffer.append(length); - buffer.append(", blocks: ["); - for(int i=0; i < blocks.length; i++) { - if (i != 0) { - buffer.append(", "); - } - buffer.append(blocks[i]); - } - buffer.append("]}"); - return buffer.toString(); - } - } - - static class MockInputStream extends FSInputStream { - final MockFile file; - int offset = 0; - - public MockInputStream(MockFile file) throws IOException { - this.file = file; - } - - @Override - public void seek(long offset) throws IOException { - this.offset = (int) offset; - } - - @Override - public long getPos() throws IOException { - return offset; - } - - @Override - public boolean seekToNewSource(long l) throws IOException { - return false; - } - - @Override - public int read() throws IOException { - if (offset < file.length) { - return file.content[offset++] & 0xff; - } - return -1; - } - } - - public static class MockPath extends Path { - private final FileSystem fs; - public MockPath(FileSystem fs, String path) { - super(path); - this.fs = fs; - } - @Override - public FileSystem getFileSystem(Configuration conf) { - return fs; - } - } - - public static class MockOutputStream extends FSDataOutputStream { - private final MockFile file; - - public MockOutputStream(MockFile file) throws IOException { - super(new DataOutputBuffer(), null); - this.file = file; - } - - /** - * Set the blocks and their location for the file. - * Must be called after the stream is closed or the block length will be - * wrong. - * @param blocks the list of blocks - */ - public void setBlocks(MockBlock... blocks) { - file.blocks = blocks; - int offset = 0; - int i = 0; - while (offset < file.length && i < blocks.length) { - blocks[i].offset = offset; - blocks[i].length = Math.min(file.length - offset, file.blockSize); - offset += blocks[i].length; - i += 1; - } - } - - @Override - public void close() throws IOException { - super.close(); - DataOutputBuffer buf = (DataOutputBuffer) getWrappedStream(); - file.length = buf.getLength(); - file.content = new byte[file.length]; - MockBlock block = new MockBlock("host1"); - block.setLength(file.length); - setBlocks(block); - System.arraycopy(buf.getData(), 0, file.content, 0, file.length); - } - - @Override - public String toString() { - return "Out stream to " + file.toString(); - } - } - - public static class MockFileSystem extends FileSystem { - final List files = new ArrayList(); - final Map fileStatusMap = new HashMap<>(); - Path workingDir = new Path("/"); - // statics for when the mock fs is created via FileSystem.get - private static String blockedUgi = null; - private final static List globalFiles = new ArrayList(); - protected Statistics statistics; - - public MockFileSystem() { - // empty - } - - @Override - public void initialize(URI uri, Configuration conf) { - setConf(conf); - statistics = getStatistics("mock", getClass()); - } - - public MockFileSystem(Configuration conf, MockFile... files) { - setConf(conf); - this.files.addAll(Arrays.asList(files)); - statistics = getStatistics("mock", getClass()); - } - - public static void setBlockedUgi(String s) { - blockedUgi = s; - } - - void clear() { - files.clear(); - } - - @Override - public URI getUri() { - try { - return new URI("mock:///"); - } catch (URISyntaxException err) { - throw new IllegalArgumentException("huh?", err); - } - } - - // increments file modification time - public void touch(MockFile file) { - if (fileStatusMap.containsKey(file)) { - FileStatus fileStatus = fileStatusMap.get(file); - FileStatus fileStatusNew = new FileStatus(fileStatus.getLen(), fileStatus.isDirectory(), - fileStatus.getReplication(), fileStatus.getBlockSize(), - fileStatus.getModificationTime() + 1, fileStatus.getAccessTime(), - fileStatus.getPermission(), fileStatus.getOwner(), fileStatus.getGroup(), - fileStatus.getPath()); - fileStatusMap.put(file, fileStatusNew); - } - } - - @SuppressWarnings("serial") - public static class MockAccessDenied extends IOException { - } - - @Override - public FSDataInputStream open(Path path, int i) throws IOException { - statistics.incrementReadOps(1); - checkAccess(); - MockFile file = findFile(path); - if (file != null) return new FSDataInputStream(new MockInputStream(file)); - throw new IOException("File not found: " + path); - } - - private MockFile findFile(Path path) { - for (MockFile file: files) { - if (file.path.equals(path)) { - return file; - } - } - for (MockFile file: globalFiles) { - if (file.path.equals(path)) { - return file; - } - } - return null; - } - - private void checkAccess() throws IOException { - if (blockedUgi == null) return; - if (!blockedUgi.equals(UserGroupInformation.getCurrentUser().getShortUserName())) return; - throw new MockAccessDenied(); - } - - @Override - public FSDataOutputStream create(Path path, FsPermission fsPermission, - boolean overwrite, int bufferSize, - short replication, long blockSize, - Progressable progressable - ) throws IOException { - statistics.incrementWriteOps(1); - checkAccess(); - MockFile file = findFile(path); - if (file == null) { - file = new MockFile(path.toString(), (int) blockSize, new byte[0]); - files.add(file); - } - return new MockOutputStream(file); - } - - @Override - public FSDataOutputStream append(Path path, int bufferSize, - Progressable progressable - ) throws IOException { - statistics.incrementWriteOps(1); - checkAccess(); - return create(path, FsPermission.getDefault(), true, bufferSize, - (short) 3, 256 * 1024, progressable); - } - - @Override - public boolean rename(Path path, Path path2) throws IOException { - statistics.incrementWriteOps(1); - checkAccess(); - return false; - } - - @Override - public boolean delete(Path path) throws IOException { - statistics.incrementWriteOps(1); - checkAccess(); - return false; - } - - @Override - public boolean delete(Path path, boolean b) throws IOException { - statistics.incrementWriteOps(1); - checkAccess(); - return false; - } - - @Override - public RemoteIterator listLocatedStatus(final Path f) - throws IOException { - return new RemoteIterator() { - private Iterator iterator = listLocatedFileStatuses(f).iterator(); - - @Override - public boolean hasNext() throws IOException { - return iterator.hasNext(); - } - - @Override - public LocatedFileStatus next() throws IOException { - return iterator.next(); - } - }; - } - - private List listLocatedFileStatuses(Path path) throws IOException { - statistics.incrementReadOps(1); - checkAccess(); - path = path.makeQualified(this); - List result = new ArrayList<>(); - String pathname = path.toString(); - String pathnameAsDir = pathname + "/"; - Set dirs = new TreeSet(); - MockFile file = findFile(path); - if (file != null) { - result.add(createLocatedStatus(file)); - return result; - } - findMatchingLocatedFiles(files, pathnameAsDir, dirs, result); - findMatchingLocatedFiles(globalFiles, pathnameAsDir, dirs, result); - // for each directory add it once - for(String dir: dirs) { - result.add(createLocatedDirectory(new MockPath(this, pathnameAsDir + dir))); - } - return result; - } - - @Override - public FileStatus[] listStatus(Path path) throws IOException { - statistics.incrementReadOps(1); - checkAccess(); - path = path.makeQualified(this); - List result = new ArrayList(); - String pathname = path.toString(); - String pathnameAsDir = pathname + "/"; - Set dirs = new TreeSet(); - MockFile file = findFile(path); - if (file != null) { - return new FileStatus[]{createStatus(file)}; - } - findMatchingFiles(files, pathnameAsDir, dirs, result); - findMatchingFiles(globalFiles, pathnameAsDir, dirs, result); - // for each directory add it once - for(String dir: dirs) { - result.add(createDirectory(new MockPath(this, pathnameAsDir + dir))); - } - return result.toArray(new FileStatus[result.size()]); - } - - private void findMatchingFiles( - List files, String pathnameAsDir, Set dirs, List result) { - for (MockFile file: files) { - String filename = file.path.toString(); - if (filename.startsWith(pathnameAsDir)) { - String tail = filename.substring(pathnameAsDir.length()); - int nextSlash = tail.indexOf('/'); - if (nextSlash > 0) { - dirs.add(tail.substring(0, nextSlash)); - } else { - result.add(createStatus(file)); - } - } - } - } - - private void findMatchingLocatedFiles( - List files, String pathnameAsDir, Set dirs, List result) - throws IOException { - for (MockFile file: files) { - String filename = file.path.toString(); - if (filename.startsWith(pathnameAsDir)) { - String tail = filename.substring(pathnameAsDir.length()); - int nextSlash = tail.indexOf('/'); - if (nextSlash > 0) { - dirs.add(tail.substring(0, nextSlash)); - } else { - result.add(createLocatedStatus(file)); - } - } - } - } - - @Override - public void setWorkingDirectory(Path path) { - workingDir = path; - } - - @Override - public Path getWorkingDirectory() { - return workingDir; - } - - @Override - public boolean mkdirs(Path path, FsPermission fsPermission) { - statistics.incrementWriteOps(1); - return false; - } - - private FileStatus createStatus(MockFile file) { - if (fileStatusMap.containsKey(file)) { - return fileStatusMap.get(file); - } - FileStatus fileStatus = new FileStatus(file.length, false, 1, file.blockSize, 0, 0, - FsPermission.createImmutable((short) 644), "owen", "group", - file.path); - fileStatusMap.put(file, fileStatus); - return fileStatus; - } - - private FileStatus createDirectory(Path dir) { - return new FileStatus(0, true, 0, 0, 0, 0, - FsPermission.createImmutable((short) 755), "owen", "group", dir); - } - - private LocatedFileStatus createLocatedStatus(MockFile file) throws IOException { - FileStatus fileStatus = createStatus(file); - return new LocatedFileStatus(fileStatus, - getFileBlockLocationsImpl(fileStatus, 0, fileStatus.getLen(), false)); - } - - private LocatedFileStatus createLocatedDirectory(Path dir) throws IOException { - FileStatus fileStatus = createDirectory(dir); - return new LocatedFileStatus(fileStatus, - getFileBlockLocationsImpl(fileStatus, 0, fileStatus.getLen(), false)); - } - - @Override - public FileStatus getFileStatus(Path path) throws IOException { - statistics.incrementReadOps(1); - checkAccess(); - path = path.makeQualified(this); - String pathnameAsDir = path.toString() + "/"; - MockFile file = findFile(path); - if (file != null) return createStatus(file); - for (MockFile dir : files) { - if (dir.path.toString().startsWith(pathnameAsDir)) { - return createDirectory(path); - } - } - for (MockFile dir : globalFiles) { - if (dir.path.toString().startsWith(pathnameAsDir)) { - return createDirectory(path); - } - } - throw new FileNotFoundException("File " + path + " does not exist"); - } - - @Override - public BlockLocation[] getFileBlockLocations(FileStatus stat, - long start, long len) throws IOException { - return getFileBlockLocationsImpl(stat, start, len, true); - } - - private BlockLocation[] getFileBlockLocationsImpl(final FileStatus stat, final long start, - final long len, - final boolean updateStats) throws IOException { - if (updateStats) { - statistics.incrementReadOps(1); - } - checkAccess(); - List result = new ArrayList(); - MockFile file = findFile(stat.getPath()); - if (file != null) { - for(MockBlock block: file.blocks) { - if (OrcInputFormat.SplitGenerator.getOverlap(block.offset, - block.length, start, len) > 0) { - String[] topology = new String[block.hosts.length]; - for(int i=0; i < topology.length; ++i) { - topology[i] = "/rack/ " + block.hosts[i]; - } - result.add(new BlockLocation(block.hosts, block.hosts, - topology, block.offset, block.length)); - } - } - return result.toArray(new BlockLocation[result.size()]); - } - return new BlockLocation[0]; - } - - @Override - public String toString() { - StringBuilder buffer = new StringBuilder(); - buffer.append("mockFs{files:["); - for(int i=0; i < files.size(); ++i) { - if (i != 0) { - buffer.append(", "); - } - buffer.append(files.get(i)); - } - buffer.append("]}"); - return buffer.toString(); - } - - public static void addGlobalFile(MockFile mockFile) { - globalFiles.add(mockFile); - } - - public static void clearGlobalFiles() { - globalFiles.clear(); - } - } - static void fill(DataOutputBuffer out, long length) throws IOException { for(int i=0; i < length; ++i) { out.write(0); @@ -2569,6 +2031,7 @@ public void testDoAs() throws Exception { conf.setClass("fs.mock.impl", MockFileSystem.class, FileSystem.class); String badUser = UserGroupInformation.getCurrentUser().getShortUserName() + "-foo"; MockFileSystem.setBlockedUgi(badUser); + // TODO: could we instead get FS from path here and add normal files for every UGI? MockFileSystem.clearGlobalFiles(); OrcInputFormat.Context.resetThreadPool(); // We need the size above to take effect. try { diff --git a/ql/src/test/queries/clientnegative/mm_concatenate.q b/ql/src/test/queries/clientnegative/mm_concatenate.q new file mode 100644 index 000000000000..c5807670e678 --- /dev/null +++ b/ql/src/test/queries/clientnegative/mm_concatenate.q @@ -0,0 +1,5 @@ +create table concat_mm (id int) stored as orc tblproperties('hivecommit'='true'); + +insert into table concat_mm select key from src limit 10; + +alter table concat_mm concatenate; diff --git a/ql/src/test/queries/clientnegative/mm_truncate_cols.q b/ql/src/test/queries/clientnegative/mm_truncate_cols.q new file mode 100644 index 000000000000..178011827fa6 --- /dev/null +++ b/ql/src/test/queries/clientnegative/mm_truncate_cols.q @@ -0,0 +1,3 @@ +CREATE TABLE mm_table(key int, value string) tblproperties('hivecommit'='true'); + +TRUNCATE TABLE mm_table COLUMNS (value); diff --git a/ql/src/test/queries/clientpositive/mm_all.q b/ql/src/test/queries/clientpositive/mm_all.q new file mode 100644 index 000000000000..9d1bf8a9e96a --- /dev/null +++ b/ql/src/test/queries/clientpositive/mm_all.q @@ -0,0 +1,422 @@ +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.fetch.task.conversion=none; +set tez.grouping.min-size=1; +set tez.grouping.max-size=2; +set hive.exec.dynamic.partition.mode=nonstrict; + + +-- Force multiple writers when reading +drop table intermediate; +create table intermediate(key int) partitioned by (p int) stored as orc; +insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; +insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; + + +drop table part_mm; +create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true'); +explain insert into table part_mm partition(key_mm='455') select key from intermediate; +insert into table part_mm partition(key_mm='455') select key from intermediate; +insert into table part_mm partition(key_mm='456') select key from intermediate; +insert into table part_mm partition(key_mm='455') select key from intermediate; +select * from part_mm order by key, key_mm; +truncate table part_mm partition(key_mm='455'); +select * from part_mm order by key, key_mm; +truncate table part_mm; +select * from part_mm order by key, key_mm; +drop table part_mm; + +drop table simple_mm; +create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true'); +insert into table simple_mm select key from intermediate; +insert overwrite table simple_mm select key from intermediate; +select * from simple_mm order by key; +insert into table simple_mm select key from intermediate; +select * from simple_mm order by key; +truncate table simple_mm; +select * from simple_mm; +drop table simple_mm; + + +-- simple DP (no bucketing) +drop table dp_mm; + +set hive.exec.dynamic.partition.mode=nonstrict; + +set hive.merge.mapredfiles=false; +set hive.merge.sparkfiles=false; +set hive.merge.tezfiles=false; + +create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc + tblproperties ('hivecommit'='true'); + +insert into table dp_mm partition (key1='123', key2) select key, key from intermediate; + +select * from dp_mm order by key; + +drop table dp_mm; + + +-- union + +create table union_mm(id int) tblproperties ('hivecommit'='true'); +insert into table union_mm +select temps.p from ( +select key as p from intermediate +union all +select key + 1 as p from intermediate ) temps; + +select * from union_mm order by id; + +insert into table union_mm +select p from +( +select key + 1 as p from intermediate +union all +select key from intermediate +) tab group by p +union all +select key + 2 as p from intermediate; + +select * from union_mm order by id; + +insert into table union_mm +SELECT p FROM +( + SELECT key + 1 as p FROM intermediate + UNION ALL + SELECT key as p FROM ( + SELECT distinct key FROM ( + SELECT key FROM ( + SELECT key + 2 as key FROM intermediate + UNION ALL + SELECT key FROM intermediate + )t1 + group by key)t2 + )t3 +)t4 +group by p; + + +select * from union_mm order by id; +drop table union_mm; + + +create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true'); +insert into table partunion_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps; + +select * from partunion_mm order by id; +drop table partunion_mm; + + + +create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='true'); + +insert into table skew_mm +select key, key, key from intermediate; + +select * from skew_mm order by k2; +drop table skew_mm; + + +create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true'); + +insert into table skew_dp_union_mm partition (k3) +select key as i, key as j, key as k, key as l from intermediate +union all +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate; + + +select * from skew_dp_union_mm order by k2; +drop table skew_dp_union_mm; + + + +set hive.merge.orcfile.stripe.level=true; +set hive.merge.tezfiles=true; +set hive.merge.mapfiles=true; +set hive.merge.mapredfiles=true; + + +create table merge0_mm (id int) stored as orc tblproperties('hivecommit'='true'); + +insert into table merge0_mm select key from intermediate; +select * from merge0_mm; + +set tez.grouping.split-count=1; +insert into table merge0_mm select key from intermediate; +set tez.grouping.split-count=0; +select * from merge0_mm; + +drop table merge0_mm; + + +create table merge2_mm (id int) tblproperties('hivecommit'='true'); + +insert into table merge2_mm select key from intermediate; +select * from merge2_mm; + +set tez.grouping.split-count=1; +insert into table merge2_mm select key from intermediate; +set tez.grouping.split-count=0; +select * from merge2_mm; + +drop table merge2_mm; + + +create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true'); + +insert into table merge1_mm partition (key) select key, key from intermediate; +select * from merge1_mm; + +set tez.grouping.split-count=1; +insert into table merge1_mm partition (key) select key, key from intermediate; +set tez.grouping.split-count=0; +select * from merge1_mm; + +drop table merge1_mm; + +set hive.merge.tezfiles=false; +set hive.merge.mapfiles=false; +set hive.merge.mapredfiles=false; + +-- TODO: need to include merge+union+DP, but it's broken for now + + +drop table ctas0_mm; +create table ctas0_mm tblproperties ('hivecommit'='true') as select * from intermediate; +select * from ctas0_mm; +drop table ctas0_mm; + +drop table ctas1_mm; +create table ctas1_mm tblproperties ('hivecommit'='true') as + select * from intermediate union all select * from intermediate; +select * from ctas1_mm; +drop table ctas1_mm; + + + +drop table iow0_mm; +create table iow0_mm(key int) tblproperties('hivecommit'='true'); +insert overwrite table iow0_mm select key from intermediate; +insert into table iow0_mm select key + 1 from intermediate; +select * from iow0_mm order by key; +insert overwrite table iow0_mm select key + 2 from intermediate; +select * from iow0_mm order by key; +drop table iow0_mm; + + +drop table iow1_mm; +create table iow1_mm(key int) partitioned by (key2 int) tblproperties('hivecommit'='true'); +insert overwrite table iow1_mm partition (key2) +select key as k1, key from intermediate union all select key as k1, key from intermediate; +insert into table iow1_mm partition (key2) +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate; +select * from iow1_mm order by key, key2; +insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate; +select * from iow1_mm order by key, key2; +insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate; +select * from iow1_mm order by key, key2; +drop table iow1_mm; + + + + +drop table load0_mm; +create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true'); +load data local inpath '../../data/files/kv1.txt' into table load0_mm; +select count(1) from load0_mm; +load data local inpath '../../data/files/kv2.txt' into table load0_mm; +select count(1) from load0_mm; +load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm; +select count(1) from load0_mm; +drop table load0_mm; + + +drop table intermediate2; +create table intermediate2 (key string, value string) stored as textfile +location 'file:${system:test.tmp.dir}/intermediate2'; +load data local inpath '../../data/files/kv1.txt' into table intermediate2; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data local inpath '../../data/files/kv3.txt' into table intermediate2; + +drop table load1_mm; +create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true'); +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' into table load1_mm; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv1.txt' into table load1_mm; +select count(1) from load1_mm; +load data local inpath '../../data/files/kv1.txt' into table intermediate2; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data local inpath '../../data/files/kv3.txt' into table intermediate2; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv*.txt' overwrite into table load1_mm; +select count(1) from load1_mm; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv2.txt' overwrite into table load1_mm; +select count(1) from load1_mm; +drop table load1_mm; + +drop table load2_mm; +create table load2_mm (key string, value string) + partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true'); +load data local inpath '../../data/files/kv1.txt' into table intermediate2; +load data local inpath '../../data/files/kv2.txt' into table intermediate2; +load data local inpath '../../data/files/kv3.txt' into table intermediate2; +load data inpath 'file:${system:test.tmp.dir}/intermediate2/kv*.txt' into table load2_mm partition(k=5, l=5); +select count(1) from load2_mm; +drop table load2_mm; +drop table intermediate2; + + +drop table intermediate_nonpart; +drop table intermmediate_part; +drop table intermmediate_nonpart; +create table intermediate_nonpart(key int, p int); +insert into intermediate_nonpart select * from intermediate; +create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true'); +insert into intermmediate_nonpart select * from intermediate; +create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true'); +insert into table intermmediate partition(p) select key, p from intermediate; + +set hive.exim.test.mode=true; + +export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart'; +export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart'; +export table intermediate to 'ql/test/data/exports/intermediate_part'; +export table intermmediate to 'ql/test/data/exports/intermmediate_part'; + +drop table intermediate_nonpart; +drop table intermmediate_part; +drop table intermmediate_nonpart; + +-- non-MM export to MM table, with and without partitions + +drop table import0_mm; +create table import0_mm(key int, p int) tblproperties('hivecommit'='true'); +import table import0_mm from 'ql/test/data/exports/intermediate_nonpart'; +select * from import0_mm order by key, p; +drop table import0_mm; + + + +drop table import1_mm; +create table import1_mm(key int) partitioned by (p int) + stored as orc tblproperties('hivecommit'='true'); +import table import1_mm from 'ql/test/data/exports/intermediate_part'; +select * from import1_mm order by key, p; +drop table import1_mm; + + +-- MM export into new MM table, non-part and part + +drop table import2_mm; +import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart'; +desc import2_mm; +select * from import2_mm order by key, p; +drop table import2_mm; + +drop table import3_mm; +import table import3_mm from 'ql/test/data/exports/intermmediate_part'; +desc import3_mm; +select * from import3_mm order by key, p; +drop table import3_mm; + +-- MM export into existing MM table, non-part and partial part + +drop table import4_mm; +create table import4_mm(key int, p int) tblproperties('hivecommit'='true'); +import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart'; +select * from import4_mm order by key, p; +drop table import4_mm; + +drop table import5_mm; +create table import5_mm(key int) partitioned by (p int) tblproperties('hivecommit'='true'); +import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part'; +select * from import5_mm order by key, p; +drop table import5_mm; + +-- MM export into existing non-MM table, non-part and part + +drop table import6_mm; +create table import6_mm(key int, p int); +import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart'; +select * from import6_mm order by key, p; +drop table import6_mm; + +drop table import7_mm; +create table import7_mm(key int) partitioned by (p int); +import table import7_mm from 'ql/test/data/exports/intermmediate_part'; +select * from import7_mm order by key, p; +drop table import7_mm; + +set hive.exim.test.mode=false; + + + +drop table multi0_1_mm; +drop table multi0_2_mm; +create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true'); +create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true'); + +from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key; + +select * from multi0_1_mm order by key, key2; +select * from multi0_2_mm order by key, key2; + +set hive.merge.mapredfiles=true; +set hive.merge.sparkfiles=true; +set hive.merge.tezfiles=true; + +from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p; +select * from multi0_1_mm order by key, key2; +select * from multi0_2_mm order by key, key2; + +set hive.merge.mapredfiles=false; +set hive.merge.sparkfiles=false; +set hive.merge.tezfiles=false; + +drop table multi0_1_mm; +drop table multi0_2_mm; + + +drop table multi1_mm; +create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true'); +from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p; +select * from multi1_mm order by key, key2, p; +from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p; +select * from multi1_mm order by key, key2, p; + +from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p; +select key, key2, p from multi1_mm order by key, key2, p; + +from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p; +select key, key2, p from multi1_mm order by key, key2, p; +drop table multi1_mm; + + + + + + + +drop table intermediate; diff --git a/ql/src/test/queries/clientpositive/mm_all2.q b/ql/src/test/queries/clientpositive/mm_all2.q new file mode 100644 index 000000000000..c4f305876957 --- /dev/null +++ b/ql/src/test/queries/clientpositive/mm_all2.q @@ -0,0 +1,64 @@ +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.fetch.task.conversion=none; +set tez.grouping.min-size=1; +set tez.grouping.max-size=2; +set hive.exec.dynamic.partition.mode=nonstrict; + + +-- Bucketing tests are slow and some tablesample ones don't work w/o MM + +-- Force multiple writers when reading +drop table intermediate; +create table intermediate(key int) partitioned by (p int) stored as orc; +insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; +insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2; + + + +drop table bucket0_mm; +create table bucket0_mm(key int, id int) +clustered by (key) into 2 buckets +tblproperties('hivecommit'='true'); +insert into table bucket0_mm select key, key from intermediate; +select * from bucket0_mm; +select * from bucket0_mm tablesample (bucket 1 out of 2) s; +select * from bucket0_mm tablesample (bucket 2 out of 2) s; +insert into table bucket0_mm select key, key from intermediate; +select * from bucket0_mm; +select * from bucket0_mm tablesample (bucket 1 out of 2) s; +select * from bucket0_mm tablesample (bucket 2 out of 2) s; +drop table bucket0_mm; + + +drop table bucket1_mm; +create table bucket1_mm(key int, id int) partitioned by (key2 int) +clustered by (key) sorted by (key) into 2 buckets +tblproperties('hivecommit'='true'); +insert into table bucket1_mm partition (key2) +select key + 1, key, key - 1 from intermediate +union all +select key - 1, key, key + 1 from intermediate; +select * from bucket1_mm; +select * from bucket1_mm tablesample (bucket 1 out of 2) s; +select * from bucket1_mm tablesample (bucket 2 out of 2) s; +drop table bucket1_mm; + + + +drop table bucket2_mm; +create table bucket2_mm(key int, id int) +clustered by (key) into 10 buckets +tblproperties('hivecommit'='true'); +insert into table bucket2_mm select key, key from intermediate where key == 0; +select * from bucket2_mm; +select * from bucket2_mm tablesample (bucket 1 out of 10) s; +select * from bucket2_mm tablesample (bucket 4 out of 10) s; +insert into table bucket2_mm select key, key from intermediate where key in (0, 103); +select * from bucket2_mm; +select * from bucket2_mm tablesample (bucket 1 out of 10) s; +select * from bucket2_mm tablesample (bucket 4 out of 10) s; +drop table bucket2_mm; + +drop table intermediate; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/mm_current.q b/ql/src/test/queries/clientpositive/mm_current.q new file mode 100644 index 000000000000..bb166cfe1367 --- /dev/null +++ b/ql/src/test/queries/clientpositive/mm_current.q @@ -0,0 +1,71 @@ +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.fetch.task.conversion=none; +set tez.grouping.min-size=1; +set tez.grouping.max-size=2; +set hive.tez.auto.reducer.parallelism=false; + +drop table intermediate; +create table intermediate(key int) partitioned by (p int) stored as orc; +insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2; +insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2; + + +drop table multi0_1_mm; +drop table multi0_2_mm; +create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true'); +create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true'); + +from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key; + +select * from multi0_1_mm order by key, key2; +select * from multi0_2_mm order by key, key2; + +set hive.merge.mapredfiles=true; +set hive.merge.sparkfiles=true; +set hive.merge.tezfiles=true; + +from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p; +select * from multi0_1_mm order by key, key2; +select * from multi0_2_mm order by key, key2; + +set hive.merge.mapredfiles=false; +set hive.merge.sparkfiles=false; +set hive.merge.tezfiles=false; + +drop table multi0_1_mm; +drop table multi0_2_mm; + + +drop table multi1_mm; +create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true'); +from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p; +select * from multi1_mm order by key, key2, p; +from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p; +select * from multi1_mm order by key, key2, p; + +from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p; +select key, key2, p from multi1_mm order by key, key2, p; + +from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p; +select key, key2, p from multi1_mm order by key, key2, p; +drop table multi1_mm; + + + +drop table intermediate; + + diff --git a/ql/src/test/queries/clientpositive/mm_insertonly_acid.q b/ql/src/test/queries/clientpositive/mm_insertonly_acid.q new file mode 100644 index 000000000000..7da99c522d16 --- /dev/null +++ b/ql/src/test/queries/clientpositive/mm_insertonly_acid.q @@ -0,0 +1,16 @@ +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.fetch.task.conversion=none; +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + + +drop table qtr_acid; +create table qtr_acid (key int) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only"); +insert into table qtr_acid partition(p='123') select distinct key from src where key > 0 order by key asc limit 10; +insert into table qtr_acid partition(p='456') select distinct key from src where key > 0 order by key desc limit 10; +explain +select * from qtr_acid order by key; +select * from qtr_acid order by key; +drop table qtr_acid; \ No newline at end of file diff --git a/ql/src/test/results/clientnegative/mm_concatenate.q.out b/ql/src/test/results/clientnegative/mm_concatenate.q.out new file mode 100644 index 000000000000..073640908799 --- /dev/null +++ b/ql/src/test/results/clientnegative/mm_concatenate.q.out @@ -0,0 +1,18 @@ +PREHOOK: query: create table concat_mm (id int) stored as orc tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@concat_mm +POSTHOOK: query: create table concat_mm (id int) stored as orc tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@concat_mm +PREHOOK: query: insert into table concat_mm select key from src limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@concat_mm +POSTHOOK: query: insert into table concat_mm select key from src limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@concat_mm +POSTHOOK: Lineage: concat_mm.id EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: Merge is not supported for MM tables diff --git a/ql/src/test/results/clientnegative/mm_truncate_cols.q.out b/ql/src/test/results/clientnegative/mm_truncate_cols.q.out new file mode 100644 index 000000000000..015f251c8fc9 --- /dev/null +++ b/ql/src/test/results/clientnegative/mm_truncate_cols.q.out @@ -0,0 +1,9 @@ +PREHOOK: query: CREATE TABLE mm_table(key int, value string) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@mm_table +POSTHOOK: query: CREATE TABLE mm_table(key int, value string) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mm_table +FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: Truncating MM table columns not presently supported diff --git a/ql/src/test/results/clientpositive/llap/mm_all.q.out b/ql/src/test/results/clientpositive/llap/mm_all.q.out new file mode 100644 index 000000000000..57c878ca0816 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/mm_all.q.out @@ -0,0 +1,2977 @@ +PREHOOK: query: -- Force multiple writers when reading +drop table intermediate +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Force multiple writers when reading +drop table intermediate +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermediate +POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermediate +PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=455 +POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=455 +POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=456 +POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=456 +POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=457 +POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=457 +POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: drop table part_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table part_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@part_mm +POSTHOOK: query: create table part_mm(key int) partitioned by (key_mm int) stored as orc tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@part_mm +PREHOOK: query: explain insert into table part_mm partition(key_mm='455') select key from intermediate +PREHOOK: type: QUERY +POSTHOOK: query: explain insert into table part_mm partition(key_mm='455') select key from intermediate +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: intermediate + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.part_mm + Execution mode: llap + LLAP IO: all inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + partition: + key_mm 455 + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.part_mm + micromanaged table: true + + Stage: Stage-3 + Stats-Aggr Operator + +PREHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table part_mm partition(key_mm='456') select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@part_mm@key_mm=456 +POSTHOOK: query: insert into table part_mm partition(key_mm='456') select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@part_mm@key_mm=456 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: query: insert into table part_mm partition(key_mm='455') select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: Lineage: part_mm PARTITION(key_mm=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from part_mm order by key, key_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@part_mm +PREHOOK: Input: default@part_mm@key_mm=455 +PREHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from part_mm order by key, key_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part_mm +POSTHOOK: Input: default@part_mm@key_mm=455 +POSTHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +0 455 +0 455 +0 456 +10 455 +10 455 +10 456 +97 455 +97 455 +97 456 +98 455 +98 455 +98 456 +100 455 +100 455 +100 456 +103 455 +103 455 +103 456 +PREHOOK: query: truncate table part_mm partition(key_mm='455') +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: query: truncate table part_mm partition(key_mm='455') +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@part_mm@key_mm=455 +PREHOOK: query: select * from part_mm order by key, key_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@part_mm +PREHOOK: Input: default@part_mm@key_mm=455 +PREHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from part_mm order by key, key_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part_mm +POSTHOOK: Input: default@part_mm@key_mm=455 +POSTHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +0 456 +10 456 +97 456 +98 456 +100 456 +103 456 +PREHOOK: query: truncate table part_mm +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@part_mm@key_mm=455 +PREHOOK: Output: default@part_mm@key_mm=456 +POSTHOOK: query: truncate table part_mm +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@part_mm@key_mm=455 +POSTHOOK: Output: default@part_mm@key_mm=456 +PREHOOK: query: select * from part_mm order by key, key_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@part_mm +PREHOOK: Input: default@part_mm@key_mm=455 +PREHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from part_mm order by key, key_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part_mm +POSTHOOK: Input: default@part_mm@key_mm=455 +POSTHOOK: Input: default@part_mm@key_mm=456 +#### A masked pattern was here #### +PREHOOK: query: drop table part_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@part_mm +PREHOOK: Output: default@part_mm +POSTHOOK: query: drop table part_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@part_mm +POSTHOOK: Output: default@part_mm +PREHOOK: query: drop table simple_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table simple_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@simple_mm +POSTHOOK: query: create table simple_mm(key int) stored as orc tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@simple_mm +PREHOOK: query: insert into table simple_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@simple_mm +POSTHOOK: query: insert into table simple_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@simple_mm +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert overwrite table simple_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@simple_mm +POSTHOOK: query: insert overwrite table simple_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@simple_mm +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from simple_mm order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@simple_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from simple_mm order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@simple_mm +#### A masked pattern was here #### +0 +10 +97 +98 +100 +103 +PREHOOK: query: insert into table simple_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@simple_mm +POSTHOOK: query: insert into table simple_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@simple_mm +POSTHOOK: Lineage: simple_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from simple_mm order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@simple_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from simple_mm order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@simple_mm +#### A masked pattern was here #### +0 +0 +10 +10 +97 +97 +98 +98 +100 +100 +103 +103 +PREHOOK: query: truncate table simple_mm +PREHOOK: type: TRUNCATETABLE +PREHOOK: Output: default@simple_mm +POSTHOOK: query: truncate table simple_mm +POSTHOOK: type: TRUNCATETABLE +POSTHOOK: Output: default@simple_mm +PREHOOK: query: select * from simple_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@simple_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from simple_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@simple_mm +#### A masked pattern was here #### +PREHOOK: query: drop table simple_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@simple_mm +PREHOOK: Output: default@simple_mm +POSTHOOK: query: drop table simple_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@simple_mm +POSTHOOK: Output: default@simple_mm +PREHOOK: query: -- simple DP (no bucketing) +drop table dp_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- simple DP (no bucketing) +drop table dp_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc + tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@dp_mm +POSTHOOK: query: create table dp_mm (key int) partitioned by (key1 string, key2 int) stored as orc + tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@dp_mm +PREHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@dp_mm@key1=123 +POSTHOOK: query: insert into table dp_mm partition (key1='123', key2) select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@dp_mm@key1=123/key2=0 +POSTHOOK: Output: default@dp_mm@key1=123/key2=10 +POSTHOOK: Output: default@dp_mm@key1=123/key2=100 +POSTHOOK: Output: default@dp_mm@key1=123/key2=103 +POSTHOOK: Output: default@dp_mm@key1=123/key2=97 +POSTHOOK: Output: default@dp_mm@key1=123/key2=98 +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=0).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=100).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=103).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=10).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=97).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: dp_mm PARTITION(key1=123,key2=98).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from dp_mm order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@dp_mm +PREHOOK: Input: default@dp_mm@key1=123/key2=0 +PREHOOK: Input: default@dp_mm@key1=123/key2=10 +PREHOOK: Input: default@dp_mm@key1=123/key2=100 +PREHOOK: Input: default@dp_mm@key1=123/key2=103 +PREHOOK: Input: default@dp_mm@key1=123/key2=97 +PREHOOK: Input: default@dp_mm@key1=123/key2=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from dp_mm order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@dp_mm +POSTHOOK: Input: default@dp_mm@key1=123/key2=0 +POSTHOOK: Input: default@dp_mm@key1=123/key2=10 +POSTHOOK: Input: default@dp_mm@key1=123/key2=100 +POSTHOOK: Input: default@dp_mm@key1=123/key2=103 +POSTHOOK: Input: default@dp_mm@key1=123/key2=97 +POSTHOOK: Input: default@dp_mm@key1=123/key2=98 +#### A masked pattern was here #### +0 123 0 +10 123 10 +97 123 97 +98 123 98 +100 123 100 +103 123 103 +PREHOOK: query: drop table dp_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@dp_mm +PREHOOK: Output: default@dp_mm +POSTHOOK: query: drop table dp_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@dp_mm +POSTHOOK: Output: default@dp_mm +PREHOOK: query: -- union + +create table union_mm(id int) tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@union_mm +POSTHOOK: query: -- union + +create table union_mm(id int) tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@union_mm +PREHOOK: query: insert into table union_mm +select temps.p from ( +select key as p from intermediate +union all +select key + 1 as p from intermediate ) temps +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@union_mm +POSTHOOK: query: insert into table union_mm +select temps.p from ( +select key as p from intermediate +union all +select key + 1 as p from intermediate ) temps +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@union_mm +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from union_mm order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@union_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from union_mm order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@union_mm +#### A masked pattern was here #### +0 +1 +10 +11 +97 +98 +98 +99 +100 +101 +103 +104 +PREHOOK: query: insert into table union_mm +select p from +( +select key + 1 as p from intermediate +union all +select key from intermediate +) tab group by p +union all +select key + 2 as p from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@union_mm +POSTHOOK: query: insert into table union_mm +select p from +( +select key + 1 as p from intermediate +union all +select key from intermediate +) tab group by p +union all +select key + 2 as p from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@union_mm +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from union_mm order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@union_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from union_mm order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@union_mm +#### A masked pattern was here #### +0 +0 +1 +1 +2 +10 +10 +11 +11 +12 +97 +97 +98 +98 +98 +99 +99 +99 +100 +100 +100 +101 +101 +102 +103 +103 +104 +104 +105 +PREHOOK: query: insert into table union_mm +SELECT p FROM +( + SELECT key + 1 as p FROM intermediate + UNION ALL + SELECT key as p FROM ( + SELECT distinct key FROM ( + SELECT key FROM ( + SELECT key + 2 as key FROM intermediate + UNION ALL + SELECT key FROM intermediate + )t1 + group by key)t2 + )t3 +)t4 +group by p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@union_mm +POSTHOOK: query: insert into table union_mm +SELECT p FROM +( + SELECT key + 1 as p FROM intermediate + UNION ALL + SELECT key as p FROM ( + SELECT distinct key FROM ( + SELECT key FROM ( + SELECT key + 2 as key FROM intermediate + UNION ALL + SELECT key FROM intermediate + )t1 + group by key)t2 + )t3 +)t4 +group by p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@union_mm +POSTHOOK: Lineage: union_mm.id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from union_mm order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@union_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from union_mm order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@union_mm +#### A masked pattern was here #### +0 +0 +0 +1 +1 +1 +2 +2 +10 +10 +10 +11 +11 +11 +12 +12 +97 +97 +97 +98 +98 +98 +98 +99 +99 +99 +99 +100 +100 +100 +100 +101 +101 +101 +102 +102 +103 +103 +103 +104 +104 +104 +105 +105 +PREHOOK: query: drop table union_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@union_mm +PREHOOK: Output: default@union_mm +POSTHOOK: query: drop table union_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@union_mm +POSTHOOK: Output: default@union_mm +PREHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@partunion_mm +POSTHOOK: query: create table partunion_mm(id int) partitioned by (key int) tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@partunion_mm +PREHOOK: query: insert into table partunion_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@partunion_mm +POSTHOOK: query: insert into table partunion_mm partition(key) +select temps.* from ( +select key as p, key from intermediate +union all +select key + 1 as p, key + 1 from intermediate ) temps +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@partunion_mm@key=0 +POSTHOOK: Output: default@partunion_mm@key=1 +POSTHOOK: Output: default@partunion_mm@key=10 +POSTHOOK: Output: default@partunion_mm@key=100 +POSTHOOK: Output: default@partunion_mm@key=101 +POSTHOOK: Output: default@partunion_mm@key=103 +POSTHOOK: Output: default@partunion_mm@key=104 +POSTHOOK: Output: default@partunion_mm@key=11 +POSTHOOK: Output: default@partunion_mm@key=97 +POSTHOOK: Output: default@partunion_mm@key=98 +POSTHOOK: Output: default@partunion_mm@key=99 +POSTHOOK: Lineage: partunion_mm PARTITION(key=0).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=100).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=101).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=103).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=104).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=10).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=97).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=98).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: partunion_mm PARTITION(key=99).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from partunion_mm order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@partunion_mm +PREHOOK: Input: default@partunion_mm@key=0 +PREHOOK: Input: default@partunion_mm@key=1 +PREHOOK: Input: default@partunion_mm@key=10 +PREHOOK: Input: default@partunion_mm@key=100 +PREHOOK: Input: default@partunion_mm@key=101 +PREHOOK: Input: default@partunion_mm@key=103 +PREHOOK: Input: default@partunion_mm@key=104 +PREHOOK: Input: default@partunion_mm@key=11 +PREHOOK: Input: default@partunion_mm@key=97 +PREHOOK: Input: default@partunion_mm@key=98 +PREHOOK: Input: default@partunion_mm@key=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from partunion_mm order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@partunion_mm +POSTHOOK: Input: default@partunion_mm@key=0 +POSTHOOK: Input: default@partunion_mm@key=1 +POSTHOOK: Input: default@partunion_mm@key=10 +POSTHOOK: Input: default@partunion_mm@key=100 +POSTHOOK: Input: default@partunion_mm@key=101 +POSTHOOK: Input: default@partunion_mm@key=103 +POSTHOOK: Input: default@partunion_mm@key=104 +POSTHOOK: Input: default@partunion_mm@key=11 +POSTHOOK: Input: default@partunion_mm@key=97 +POSTHOOK: Input: default@partunion_mm@key=98 +POSTHOOK: Input: default@partunion_mm@key=99 +#### A masked pattern was here #### +0 0 +1 1 +10 10 +11 11 +97 97 +98 98 +98 98 +99 99 +100 100 +101 101 +103 103 +104 104 +PREHOOK: query: drop table partunion_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@partunion_mm +PREHOOK: Output: default@partunion_mm +POSTHOOK: query: drop table partunion_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@partunion_mm +POSTHOOK: Output: default@partunion_mm +PREHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@skew_mm +POSTHOOK: query: create table skew_mm(k1 int, k2 int, k4 int) skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) + stored as directories tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@skew_mm +PREHOOK: query: insert into table skew_mm +select key, key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@skew_mm +POSTHOOK: query: insert into table skew_mm +select key, key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@skew_mm +POSTHOOK: Lineage: skew_mm.k1 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm.k2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_mm.k4 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from skew_mm order by k2 +PREHOOK: type: QUERY +PREHOOK: Input: default@skew_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from skew_mm order by k2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@skew_mm +#### A masked pattern was here #### +0 0 0 +10 10 10 +97 97 97 +98 98 98 +100 100 100 +103 103 103 +PREHOOK: query: drop table skew_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@skew_mm +PREHOOK: Output: default@skew_mm +POSTHOOK: query: drop table skew_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@skew_mm +POSTHOOK: Output: default@skew_mm +PREHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@skew_dp_union_mm +POSTHOOK: query: create table skew_dp_union_mm(k1 int, k2 int, k4 int) partitioned by (k3 int) +skewed by (k1, k4) on ((0,0),(1,1),(2,2),(3,3)) stored as directories tblproperties ('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@skew_dp_union_mm +PREHOOK: query: insert into table skew_dp_union_mm partition (k3) +select key as i, key as j, key as k, key as l from intermediate +union all +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@skew_dp_union_mm +POSTHOOK: query: insert into table skew_dp_union_mm partition (k3) +select key as i, key as j, key as k, key as l from intermediate +union all +select key +1 as i, key +2 as j, key +3 as k, key +4 as l from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@skew_dp_union_mm@k3=0 +POSTHOOK: Output: default@skew_dp_union_mm@k3=10 +POSTHOOK: Output: default@skew_dp_union_mm@k3=100 +POSTHOOK: Output: default@skew_dp_union_mm@k3=101 +POSTHOOK: Output: default@skew_dp_union_mm@k3=102 +POSTHOOK: Output: default@skew_dp_union_mm@k3=103 +POSTHOOK: Output: default@skew_dp_union_mm@k3=104 +POSTHOOK: Output: default@skew_dp_union_mm@k3=107 +POSTHOOK: Output: default@skew_dp_union_mm@k3=14 +POSTHOOK: Output: default@skew_dp_union_mm@k3=4 +POSTHOOK: Output: default@skew_dp_union_mm@k3=97 +POSTHOOK: Output: default@skew_dp_union_mm@k3=98 +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=0).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=100).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=101).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=102).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=103).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=104).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=107).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=10).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=14).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=4).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=97).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k1 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k2 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: skew_dp_union_mm PARTITION(k3=98).k4 EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from skew_dp_union_mm order by k2 +PREHOOK: type: QUERY +PREHOOK: Input: default@skew_dp_union_mm +PREHOOK: Input: default@skew_dp_union_mm@k3=0 +PREHOOK: Input: default@skew_dp_union_mm@k3=10 +PREHOOK: Input: default@skew_dp_union_mm@k3=100 +PREHOOK: Input: default@skew_dp_union_mm@k3=101 +PREHOOK: Input: default@skew_dp_union_mm@k3=102 +PREHOOK: Input: default@skew_dp_union_mm@k3=103 +PREHOOK: Input: default@skew_dp_union_mm@k3=104 +PREHOOK: Input: default@skew_dp_union_mm@k3=107 +PREHOOK: Input: default@skew_dp_union_mm@k3=14 +PREHOOK: Input: default@skew_dp_union_mm@k3=4 +PREHOOK: Input: default@skew_dp_union_mm@k3=97 +PREHOOK: Input: default@skew_dp_union_mm@k3=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from skew_dp_union_mm order by k2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@skew_dp_union_mm +POSTHOOK: Input: default@skew_dp_union_mm@k3=0 +POSTHOOK: Input: default@skew_dp_union_mm@k3=10 +POSTHOOK: Input: default@skew_dp_union_mm@k3=100 +POSTHOOK: Input: default@skew_dp_union_mm@k3=101 +POSTHOOK: Input: default@skew_dp_union_mm@k3=102 +POSTHOOK: Input: default@skew_dp_union_mm@k3=103 +POSTHOOK: Input: default@skew_dp_union_mm@k3=104 +POSTHOOK: Input: default@skew_dp_union_mm@k3=107 +POSTHOOK: Input: default@skew_dp_union_mm@k3=14 +POSTHOOK: Input: default@skew_dp_union_mm@k3=4 +POSTHOOK: Input: default@skew_dp_union_mm@k3=97 +POSTHOOK: Input: default@skew_dp_union_mm@k3=98 +#### A masked pattern was here #### +0 0 0 0 +1 2 3 4 +10 10 10 10 +11 12 13 14 +97 97 97 97 +98 98 98 98 +98 99 100 101 +99 100 101 102 +100 100 100 100 +101 102 103 104 +103 103 103 103 +104 105 106 107 +PREHOOK: query: drop table skew_dp_union_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@skew_dp_union_mm +PREHOOK: Output: default@skew_dp_union_mm +POSTHOOK: query: drop table skew_dp_union_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@skew_dp_union_mm +POSTHOOK: Output: default@skew_dp_union_mm +PREHOOK: query: create table merge0_mm (id int) stored as orc tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@merge0_mm +POSTHOOK: query: create table merge0_mm (id int) stored as orc tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@merge0_mm +PREHOOK: query: insert into table merge0_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@merge0_mm +POSTHOOK: query: insert into table merge0_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@merge0_mm +POSTHOOK: Lineage: merge0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from merge0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge0_mm +#### A masked pattern was here #### +98 +97 +100 +103 +0 +10 +PREHOOK: query: insert into table merge0_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@merge0_mm +POSTHOOK: query: insert into table merge0_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@merge0_mm +POSTHOOK: Lineage: merge0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from merge0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge0_mm +#### A masked pattern was here #### +98 +97 +100 +103 +0 +10 +98 +97 +100 +103 +0 +10 +PREHOOK: query: drop table merge0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@merge0_mm +PREHOOK: Output: default@merge0_mm +POSTHOOK: query: drop table merge0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@merge0_mm +POSTHOOK: Output: default@merge0_mm +PREHOOK: query: create table merge2_mm (id int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: create table merge2_mm (id int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@merge2_mm +PREHOOK: query: insert into table merge2_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: insert into table merge2_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@merge2_mm +POSTHOOK: Lineage: merge2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from merge2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge2_mm +#### A masked pattern was here #### +98 +97 +100 +103 +0 +10 +PREHOOK: query: insert into table merge2_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: insert into table merge2_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@merge2_mm +POSTHOOK: Lineage: merge2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from merge2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge2_mm +#### A masked pattern was here #### +98 +97 +100 +103 +0 +10 +98 +97 +100 +103 +0 +10 +PREHOOK: query: drop table merge2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@merge2_mm +PREHOOK: Output: default@merge2_mm +POSTHOOK: query: drop table merge2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@merge2_mm +POSTHOOK: Output: default@merge2_mm +PREHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@merge1_mm +POSTHOOK: query: create table merge1_mm (id int) partitioned by (key int) stored as orc tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@merge1_mm +PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@merge1_mm +POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@merge1_mm@key=0 +POSTHOOK: Output: default@merge1_mm@key=10 +POSTHOOK: Output: default@merge1_mm@key=100 +POSTHOOK: Output: default@merge1_mm@key=103 +POSTHOOK: Output: default@merge1_mm@key=97 +POSTHOOK: Output: default@merge1_mm@key=98 +POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=100).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=103).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge1_mm +PREHOOK: Input: default@merge1_mm@key=0 +PREHOOK: Input: default@merge1_mm@key=10 +PREHOOK: Input: default@merge1_mm@key=100 +PREHOOK: Input: default@merge1_mm@key=103 +PREHOOK: Input: default@merge1_mm@key=97 +PREHOOK: Input: default@merge1_mm@key=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from merge1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge1_mm +POSTHOOK: Input: default@merge1_mm@key=0 +POSTHOOK: Input: default@merge1_mm@key=10 +POSTHOOK: Input: default@merge1_mm@key=100 +POSTHOOK: Input: default@merge1_mm@key=103 +POSTHOOK: Input: default@merge1_mm@key=97 +POSTHOOK: Input: default@merge1_mm@key=98 +#### A masked pattern was here #### +100 100 +103 103 +97 97 +98 98 +0 0 +10 10 +PREHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@merge1_mm +POSTHOOK: query: insert into table merge1_mm partition (key) select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@merge1_mm@key=0 +POSTHOOK: Output: default@merge1_mm@key=10 +POSTHOOK: Output: default@merge1_mm@key=100 +POSTHOOK: Output: default@merge1_mm@key=103 +POSTHOOK: Output: default@merge1_mm@key=97 +POSTHOOK: Output: default@merge1_mm@key=98 +POSTHOOK: Lineage: merge1_mm PARTITION(key=0).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=100).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=103).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=10).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=97).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: merge1_mm PARTITION(key=98).id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from merge1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@merge1_mm +PREHOOK: Input: default@merge1_mm@key=0 +PREHOOK: Input: default@merge1_mm@key=10 +PREHOOK: Input: default@merge1_mm@key=100 +PREHOOK: Input: default@merge1_mm@key=103 +PREHOOK: Input: default@merge1_mm@key=97 +PREHOOK: Input: default@merge1_mm@key=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from merge1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@merge1_mm +POSTHOOK: Input: default@merge1_mm@key=0 +POSTHOOK: Input: default@merge1_mm@key=10 +POSTHOOK: Input: default@merge1_mm@key=100 +POSTHOOK: Input: default@merge1_mm@key=103 +POSTHOOK: Input: default@merge1_mm@key=97 +POSTHOOK: Input: default@merge1_mm@key=98 +#### A masked pattern was here #### +100 100 +100 100 +103 103 +103 103 +97 97 +97 97 +98 98 +98 98 +0 0 +0 0 +10 10 +10 10 +PREHOOK: query: drop table merge1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@merge1_mm +PREHOOK: Output: default@merge1_mm +POSTHOOK: query: drop table merge1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@merge1_mm +POSTHOOK: Output: default@merge1_mm +PREHOOK: query: -- TODO: need to include merge+union+DP, but it's broken for now + + +drop table ctas0_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- TODO: need to include merge+union+DP, but it's broken for now + + +drop table ctas0_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table ctas0_mm tblproperties ('hivecommit'='true') as select * from intermediate +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas0_mm +POSTHOOK: query: create table ctas0_mm tblproperties ('hivecommit'='true') as select * from intermediate +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ctas0_mm +POSTHOOK: Lineage: ctas0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: ctas0_mm.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from ctas0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@ctas0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from ctas0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ctas0_mm +#### A masked pattern was here #### +98 455 +97 455 +100 457 +103 457 +0 456 +10 456 +PREHOOK: query: drop table ctas0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ctas0_mm +PREHOOK: Output: default@ctas0_mm +POSTHOOK: query: drop table ctas0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ctas0_mm +POSTHOOK: Output: default@ctas0_mm +PREHOOK: query: drop table ctas1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table ctas1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as + select * from intermediate union all select * from intermediate +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: database:default +PREHOOK: Output: default@ctas1_mm +POSTHOOK: query: create table ctas1_mm tblproperties ('hivecommit'='true') as + select * from intermediate union all select * from intermediate +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ctas1_mm +POSTHOOK: Lineage: ctas1_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: ctas1_mm.p EXPRESSION [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from ctas1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@ctas1_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from ctas1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ctas1_mm +#### A masked pattern was here #### +98 455 +97 455 +100 457 +103 457 +0 456 +10 456 +98 455 +97 455 +100 457 +103 457 +0 456 +10 456 +PREHOOK: query: drop table ctas1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@ctas1_mm +PREHOOK: Output: default@ctas1_mm +POSTHOOK: query: drop table ctas1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@ctas1_mm +POSTHOOK: Output: default@ctas1_mm +PREHOOK: query: drop table iow0_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table iow0_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table iow0_mm(key int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@iow0_mm +POSTHOOK: query: create table iow0_mm(key int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@iow0_mm +PREHOOK: query: insert overwrite table iow0_mm select key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow0_mm +POSTHOOK: query: insert overwrite table iow0_mm select key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow0_mm +POSTHOOK: Lineage: iow0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table iow0_mm select key + 1 from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow0_mm +POSTHOOK: query: insert into table iow0_mm select key + 1 from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow0_mm +POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from iow0_mm order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@iow0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from iow0_mm order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@iow0_mm +#### A masked pattern was here #### +0 +1 +10 +11 +97 +98 +98 +99 +100 +101 +103 +104 +PREHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow0_mm +POSTHOOK: query: insert overwrite table iow0_mm select key + 2 from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow0_mm +POSTHOOK: Lineage: iow0_mm.key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from iow0_mm order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@iow0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from iow0_mm order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@iow0_mm +#### A masked pattern was here #### +2 +12 +99 +100 +102 +105 +PREHOOK: query: drop table iow0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@iow0_mm +PREHOOK: Output: default@iow0_mm +POSTHOOK: query: drop table iow0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@iow0_mm +POSTHOOK: Output: default@iow0_mm +PREHOOK: query: drop table iow1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table iow1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table iow1_mm(key int) partitioned by (key2 int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: create table iow1_mm(key int) partitioned by (key2 int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@iow1_mm +PREHOOK: query: insert overwrite table iow1_mm partition (key2) +select key as k1, key from intermediate union all select key as k1, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: insert overwrite table iow1_mm partition (key2) +select key as k1, key from intermediate union all select key as k1, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow1_mm@key2=0 +POSTHOOK: Output: default@iow1_mm@key2=10 +POSTHOOK: Output: default@iow1_mm@key2=100 +POSTHOOK: Output: default@iow1_mm@key2=103 +POSTHOOK: Output: default@iow1_mm@key2=97 +POSTHOOK: Output: default@iow1_mm@key2=98 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: insert into table iow1_mm partition (key2) +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: insert into table iow1_mm partition (key2) +select key + 1 as k1, key from intermediate union all select key as k1, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow1_mm@key2=0 +POSTHOOK: Output: default@iow1_mm@key2=10 +POSTHOOK: Output: default@iow1_mm@key2=100 +POSTHOOK: Output: default@iow1_mm@key2=103 +POSTHOOK: Output: default@iow1_mm@key2=97 +POSTHOOK: Output: default@iow1_mm@key2=98 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from iow1_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@iow1_mm +PREHOOK: Input: default@iow1_mm@key2=0 +PREHOOK: Input: default@iow1_mm@key2=10 +PREHOOK: Input: default@iow1_mm@key2=100 +PREHOOK: Input: default@iow1_mm@key2=103 +PREHOOK: Input: default@iow1_mm@key2=97 +PREHOOK: Input: default@iow1_mm@key2=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from iow1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@iow1_mm +POSTHOOK: Input: default@iow1_mm@key2=0 +POSTHOOK: Input: default@iow1_mm@key2=10 +POSTHOOK: Input: default@iow1_mm@key2=100 +POSTHOOK: Input: default@iow1_mm@key2=103 +POSTHOOK: Input: default@iow1_mm@key2=97 +POSTHOOK: Input: default@iow1_mm@key2=98 +#### A masked pattern was here #### +0 0 +0 0 +0 0 +1 0 +10 10 +10 10 +10 10 +11 10 +97 97 +97 97 +97 97 +98 97 +98 98 +98 98 +98 98 +99 98 +100 100 +100 100 +100 100 +101 100 +103 103 +103 103 +103 103 +104 103 +PREHOOK: query: insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key from intermediate union all select key + 4 as k1, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow1_mm@key2=0 +POSTHOOK: Output: default@iow1_mm@key2=10 +POSTHOOK: Output: default@iow1_mm@key2=100 +POSTHOOK: Output: default@iow1_mm@key2=103 +POSTHOOK: Output: default@iow1_mm@key2=97 +POSTHOOK: Output: default@iow1_mm@key2=98 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=0).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=10).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from iow1_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@iow1_mm +PREHOOK: Input: default@iow1_mm@key2=0 +PREHOOK: Input: default@iow1_mm@key2=10 +PREHOOK: Input: default@iow1_mm@key2=100 +PREHOOK: Input: default@iow1_mm@key2=103 +PREHOOK: Input: default@iow1_mm@key2=97 +PREHOOK: Input: default@iow1_mm@key2=98 +#### A masked pattern was here #### +POSTHOOK: query: select * from iow1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@iow1_mm +POSTHOOK: Input: default@iow1_mm@key2=0 +POSTHOOK: Input: default@iow1_mm@key2=10 +POSTHOOK: Input: default@iow1_mm@key2=100 +POSTHOOK: Input: default@iow1_mm@key2=103 +POSTHOOK: Input: default@iow1_mm@key2=97 +POSTHOOK: Input: default@iow1_mm@key2=98 +#### A masked pattern was here #### +3 0 +4 0 +13 10 +14 10 +100 97 +101 97 +101 98 +102 98 +103 100 +104 100 +106 103 +107 103 +PREHOOK: query: insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: insert overwrite table iow1_mm partition (key2) +select key + 3 as k1, key + 3 from intermediate union all select key + 2 as k1, key + 2 from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@iow1_mm@key2=100 +POSTHOOK: Output: default@iow1_mm@key2=101 +POSTHOOK: Output: default@iow1_mm@key2=102 +POSTHOOK: Output: default@iow1_mm@key2=103 +POSTHOOK: Output: default@iow1_mm@key2=105 +POSTHOOK: Output: default@iow1_mm@key2=106 +POSTHOOK: Output: default@iow1_mm@key2=12 +POSTHOOK: Output: default@iow1_mm@key2=13 +POSTHOOK: Output: default@iow1_mm@key2=2 +POSTHOOK: Output: default@iow1_mm@key2=3 +POSTHOOK: Output: default@iow1_mm@key2=99 +POSTHOOK: Lineage: iow1_mm PARTITION(key2=100).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=103).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=105).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=106).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=12).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=13).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=2).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=3).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: iow1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from iow1_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@iow1_mm +PREHOOK: Input: default@iow1_mm@key2=0 +PREHOOK: Input: default@iow1_mm@key2=10 +PREHOOK: Input: default@iow1_mm@key2=100 +PREHOOK: Input: default@iow1_mm@key2=101 +PREHOOK: Input: default@iow1_mm@key2=102 +PREHOOK: Input: default@iow1_mm@key2=103 +PREHOOK: Input: default@iow1_mm@key2=105 +PREHOOK: Input: default@iow1_mm@key2=106 +PREHOOK: Input: default@iow1_mm@key2=12 +PREHOOK: Input: default@iow1_mm@key2=13 +PREHOOK: Input: default@iow1_mm@key2=2 +PREHOOK: Input: default@iow1_mm@key2=3 +PREHOOK: Input: default@iow1_mm@key2=97 +PREHOOK: Input: default@iow1_mm@key2=98 +PREHOOK: Input: default@iow1_mm@key2=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from iow1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@iow1_mm +POSTHOOK: Input: default@iow1_mm@key2=0 +POSTHOOK: Input: default@iow1_mm@key2=10 +POSTHOOK: Input: default@iow1_mm@key2=100 +POSTHOOK: Input: default@iow1_mm@key2=101 +POSTHOOK: Input: default@iow1_mm@key2=102 +POSTHOOK: Input: default@iow1_mm@key2=103 +POSTHOOK: Input: default@iow1_mm@key2=105 +POSTHOOK: Input: default@iow1_mm@key2=106 +POSTHOOK: Input: default@iow1_mm@key2=12 +POSTHOOK: Input: default@iow1_mm@key2=13 +POSTHOOK: Input: default@iow1_mm@key2=2 +POSTHOOK: Input: default@iow1_mm@key2=3 +POSTHOOK: Input: default@iow1_mm@key2=97 +POSTHOOK: Input: default@iow1_mm@key2=98 +POSTHOOK: Input: default@iow1_mm@key2=99 +#### A masked pattern was here #### +2 2 +3 0 +3 3 +4 0 +12 12 +13 10 +13 13 +14 10 +99 99 +100 97 +100 100 +100 100 +101 97 +101 98 +101 101 +102 98 +102 102 +103 103 +105 105 +106 106 +PREHOOK: query: drop table iow1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@iow1_mm +PREHOOK: Output: default@iow1_mm +POSTHOOK: query: drop table iow1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@iow1_mm +POSTHOOK: Output: default@iow1_mm +PREHOOK: query: drop table load0_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table load0_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@load0_mm +POSTHOOK: query: create table load0_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@load0_mm +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load0_mm +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table load0_mm +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load0_mm +PREHOOK: query: select count(1) from load0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load0_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load0_mm +#### A masked pattern was here #### +500 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load0_mm +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table load0_mm +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load0_mm +PREHOOK: query: select count(1) from load0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load0_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load0_mm +#### A masked pattern was here #### +1000 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load0_mm +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' overwrite into table load0_mm +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load0_mm +PREHOOK: query: select count(1) from load0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load0_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load0_mm +#### A masked pattern was here #### +500 +PREHOOK: query: drop table load0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@load0_mm +PREHOOK: Output: default@load0_mm +POSTHOOK: query: drop table load0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@load0_mm +POSTHOOK: Output: default@load0_mm +PREHOOK: query: drop table intermediate2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermediate2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate2 (key string, value string) stored as textfile +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: create table intermediate2 (key string, value string) stored as textfile +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: drop table load1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table load1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@load1_mm +POSTHOOK: query: create table load1_mm (key string, value string) stored as textfile tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@load1_mm +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +PREHOOK: query: select count(1) from load1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load1_mm +#### A masked pattern was here #### +1000 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +PREHOOK: query: select count(1) from load1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load1_mm +#### A masked pattern was here #### +1050 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load1_mm +PREHOOK: query: select count(1) from load1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load1_mm +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load1_mm +#### A masked pattern was here #### +500 +PREHOOK: query: drop table load1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@load1_mm +PREHOOK: Output: default@load1_mm +POSTHOOK: query: drop table load1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@load1_mm +POSTHOOK: Output: default@load1_mm +PREHOOK: query: drop table load2_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table load2_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table load2_mm (key string, value string) + partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@load2_mm +POSTHOOK: query: create table load2_mm (key string, value string) + partitioned by (k int, l int) stored as textfile tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@load2_mm +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv2.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: load data local inpath '../../data/files/kv3.txt' into table intermediate2 +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@intermediate2 +#### A masked pattern was here #### +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@load2_mm +#### A masked pattern was here #### +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@load2_mm +POSTHOOK: Output: default@load2_mm@k=5/l=5 +PREHOOK: query: select count(1) from load2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@load2_mm +PREHOOK: Input: default@load2_mm@k=5/l=5 +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from load2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@load2_mm +POSTHOOK: Input: default@load2_mm@k=5/l=5 +#### A masked pattern was here #### +1025 +PREHOOK: query: drop table load2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@load2_mm +PREHOOK: Output: default@load2_mm +POSTHOOK: query: drop table load2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@load2_mm +POSTHOOK: Output: default@load2_mm +PREHOOK: query: drop table intermediate2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermediate2 +PREHOOK: Output: default@intermediate2 +POSTHOOK: query: drop table intermediate2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermediate2 +POSTHOOK: Output: default@intermediate2 +PREHOOK: query: drop table intermediate_nonpart +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermediate_nonpart +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table intermmediate_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermmediate_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table intermmediate_nonpart +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermmediate_nonpart +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate_nonpart(key int, p int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermediate_nonpart +POSTHOOK: query: create table intermediate_nonpart(key int, p int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermediate_nonpart +PREHOOK: query: insert into intermediate_nonpart select * from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@intermediate_nonpart +POSTHOOK: query: insert into intermediate_nonpart select * from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@intermediate_nonpart +POSTHOOK: Lineage: intermediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermmediate_nonpart +POSTHOOK: query: create table intermmediate_nonpart(key int, p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermmediate_nonpart +PREHOOK: query: insert into intermmediate_nonpart select * from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@intermmediate_nonpart +POSTHOOK: query: insert into intermmediate_nonpart select * from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@intermmediate_nonpart +POSTHOOK: Lineage: intermmediate_nonpart.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermmediate_nonpart.p SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermmediate +POSTHOOK: query: create table intermmediate(key int) partitioned by (p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermmediate +PREHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@intermmediate +POSTHOOK: query: insert into table intermmediate partition(p) select key, p from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@intermmediate@p=455 +POSTHOOK: Output: default@intermmediate@p=456 +POSTHOOK: Output: default@intermmediate@p=457 +POSTHOOK: Lineage: intermmediate PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermmediate PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: intermmediate PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermediate_nonpart +#### A masked pattern was here #### +POSTHOOK: query: export table intermediate_nonpart to 'ql/test/data/exports/intermediate_nonpart' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermediate_nonpart +#### A masked pattern was here #### +PREHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermmediate_nonpart +#### A masked pattern was here #### +POSTHOOK: query: export table intermmediate_nonpart to 'ql/test/data/exports/intermmediate_nonpart' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermmediate_nonpart +#### A masked pattern was here #### +PREHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +#### A masked pattern was here #### +POSTHOOK: query: export table intermediate to 'ql/test/data/exports/intermediate_part' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +#### A masked pattern was here #### +PREHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part' +PREHOOK: type: EXPORT +PREHOOK: Input: default@intermmediate@p=455 +PREHOOK: Input: default@intermmediate@p=456 +PREHOOK: Input: default@intermmediate@p=457 +#### A masked pattern was here #### +POSTHOOK: query: export table intermmediate to 'ql/test/data/exports/intermmediate_part' +POSTHOOK: type: EXPORT +POSTHOOK: Input: default@intermmediate@p=455 +POSTHOOK: Input: default@intermmediate@p=456 +POSTHOOK: Input: default@intermmediate@p=457 +#### A masked pattern was here #### +PREHOOK: query: drop table intermediate_nonpart +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermediate_nonpart +PREHOOK: Output: default@intermediate_nonpart +POSTHOOK: query: drop table intermediate_nonpart +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermediate_nonpart +POSTHOOK: Output: default@intermediate_nonpart +PREHOOK: query: drop table intermmediate_part +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermmediate_part +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table intermmediate_nonpart +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermmediate_nonpart +PREHOOK: Output: default@intermmediate_nonpart +POSTHOOK: query: drop table intermmediate_nonpart +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermmediate_nonpart +POSTHOOK: Output: default@intermmediate_nonpart +PREHOOK: query: -- non-MM export to MM table, with and without partitions + +drop table import0_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- non-MM export to MM table, with and without partitions + +drop table import0_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import0_mm(key int, p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import0_mm +POSTHOOK: query: create table import0_mm(key int, p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import0_mm +PREHOOK: query: import table import0_mm from 'ql/test/data/exports/intermediate_nonpart' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import0_mm +POSTHOOK: query: import table import0_mm from 'ql/test/data/exports/intermediate_nonpart' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import0_mm +PREHOOK: query: select * from import0_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from import0_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import0_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import0_mm +PREHOOK: Output: default@import0_mm +POSTHOOK: query: drop table import0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import0_mm +POSTHOOK: Output: default@import0_mm +PREHOOK: query: drop table import1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table import1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import1_mm(key int) partitioned by (p int) + stored as orc tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import1_mm +POSTHOOK: query: create table import1_mm(key int) partitioned by (p int) + stored as orc tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import1_mm +PREHOOK: query: import table import1_mm from 'ql/test/data/exports/intermediate_part' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import1_mm +POSTHOOK: query: import table import1_mm from 'ql/test/data/exports/intermediate_part' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import1_mm +POSTHOOK: Output: default@import1_mm@p=455 +POSTHOOK: Output: default@import1_mm@p=456 +POSTHOOK: Output: default@import1_mm@p=457 +PREHOOK: query: select * from import1_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import1_mm +PREHOOK: Input: default@import1_mm@p=455 +PREHOOK: Input: default@import1_mm@p=456 +PREHOOK: Input: default@import1_mm@p=457 +#### A masked pattern was here #### +POSTHOOK: query: select * from import1_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import1_mm +POSTHOOK: Input: default@import1_mm@p=455 +POSTHOOK: Input: default@import1_mm@p=456 +POSTHOOK: Input: default@import1_mm@p=457 +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import1_mm +PREHOOK: Output: default@import1_mm +POSTHOOK: query: drop table import1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import1_mm +POSTHOOK: Output: default@import1_mm +PREHOOK: query: -- MM export into new MM table, non-part and part + +drop table import2_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- MM export into new MM table, non-part and part + +drop table import2_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: database:default +POSTHOOK: query: import table import2_mm from 'ql/test/data/exports/intermmediate_nonpart' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import2_mm +PREHOOK: query: desc import2_mm +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@import2_mm +POSTHOOK: query: desc import2_mm +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@import2_mm +key int +p int +PREHOOK: query: select * from import2_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from import2_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import2_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import2_mm +PREHOOK: Output: default@import2_mm +POSTHOOK: query: drop table import2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import2_mm +POSTHOOK: Output: default@import2_mm +PREHOOK: query: drop table import3_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table import3_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: import table import3_mm from 'ql/test/data/exports/intermmediate_part' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: database:default +POSTHOOK: query: import table import3_mm from 'ql/test/data/exports/intermmediate_part' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import3_mm +POSTHOOK: Output: default@import3_mm@p=455 +POSTHOOK: Output: default@import3_mm@p=456 +POSTHOOK: Output: default@import3_mm@p=457 +PREHOOK: query: desc import3_mm +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@import3_mm +POSTHOOK: query: desc import3_mm +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@import3_mm +key int +p int + +# Partition Information +# col_name data_type comment + +p int +PREHOOK: query: select * from import3_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import3_mm +PREHOOK: Input: default@import3_mm@p=455 +PREHOOK: Input: default@import3_mm@p=456 +PREHOOK: Input: default@import3_mm@p=457 +#### A masked pattern was here #### +POSTHOOK: query: select * from import3_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import3_mm +POSTHOOK: Input: default@import3_mm@p=455 +POSTHOOK: Input: default@import3_mm@p=456 +POSTHOOK: Input: default@import3_mm@p=457 +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import3_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import3_mm +PREHOOK: Output: default@import3_mm +POSTHOOK: query: drop table import3_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import3_mm +POSTHOOK: Output: default@import3_mm +PREHOOK: query: -- MM export into existing MM table, non-part and partial part + +drop table import4_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- MM export into existing MM table, non-part and partial part + +drop table import4_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import4_mm(key int, p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import4_mm +POSTHOOK: query: create table import4_mm(key int, p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import4_mm +PREHOOK: query: import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import4_mm +POSTHOOK: query: import table import4_mm from 'ql/test/data/exports/intermmediate_nonpart' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import4_mm +PREHOOK: query: select * from import4_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import4_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from import4_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import4_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import4_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import4_mm +PREHOOK: Output: default@import4_mm +POSTHOOK: query: drop table import4_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import4_mm +POSTHOOK: Output: default@import4_mm +PREHOOK: query: drop table import5_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table import5_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import5_mm +POSTHOOK: query: create table import5_mm(key int) partitioned by (p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import5_mm +PREHOOK: query: import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import5_mm +POSTHOOK: query: import table import5_mm partition(p=455) from 'ql/test/data/exports/intermmediate_part' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import5_mm +POSTHOOK: Output: default@import5_mm@p=455 +PREHOOK: query: select * from import5_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import5_mm +PREHOOK: Input: default@import5_mm@p=455 +#### A masked pattern was here #### +POSTHOOK: query: select * from import5_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import5_mm +POSTHOOK: Input: default@import5_mm@p=455 +#### A masked pattern was here #### +97 455 +98 455 +PREHOOK: query: drop table import5_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import5_mm +PREHOOK: Output: default@import5_mm +POSTHOOK: query: drop table import5_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import5_mm +POSTHOOK: Output: default@import5_mm +PREHOOK: query: -- MM export into existing non-MM table, non-part and part + +drop table import6_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- MM export into existing non-MM table, non-part and part + +drop table import6_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import6_mm(key int, p int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import6_mm +POSTHOOK: query: create table import6_mm(key int, p int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import6_mm +PREHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import6_mm +POSTHOOK: query: import table import6_mm from 'ql/test/data/exports/intermmediate_nonpart' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import6_mm +PREHOOK: query: select * from import6_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import6_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from import6_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import6_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import6_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import6_mm +PREHOOK: Output: default@import6_mm +POSTHOOK: query: drop table import6_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import6_mm +POSTHOOK: Output: default@import6_mm +PREHOOK: query: drop table import7_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table import7_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table import7_mm(key int) partitioned by (p int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@import7_mm +POSTHOOK: query: create table import7_mm(key int) partitioned by (p int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@import7_mm +PREHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part' +PREHOOK: type: IMPORT +#### A masked pattern was here #### +PREHOOK: Output: default@import7_mm +POSTHOOK: query: import table import7_mm from 'ql/test/data/exports/intermmediate_part' +POSTHOOK: type: IMPORT +#### A masked pattern was here #### +POSTHOOK: Output: default@import7_mm +POSTHOOK: Output: default@import7_mm@p=455 +POSTHOOK: Output: default@import7_mm@p=456 +POSTHOOK: Output: default@import7_mm@p=457 +PREHOOK: query: select * from import7_mm order by key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@import7_mm +PREHOOK: Input: default@import7_mm@p=455 +PREHOOK: Input: default@import7_mm@p=456 +PREHOOK: Input: default@import7_mm@p=457 +#### A masked pattern was here #### +POSTHOOK: query: select * from import7_mm order by key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@import7_mm +POSTHOOK: Input: default@import7_mm@p=455 +POSTHOOK: Input: default@import7_mm@p=456 +POSTHOOK: Input: default@import7_mm@p=457 +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table import7_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@import7_mm +PREHOOK: Output: default@import7_mm +POSTHOOK: query: drop table import7_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@import7_mm +POSTHOOK: Output: default@import7_mm +PREHOOK: query: drop table multi0_1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table multi0_1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table multi0_2_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table multi0_2_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@multi0_1_mm +POSTHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@multi0_1_mm +PREHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@multi0_2_mm +PREHOOK: query: from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi0_1_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi0_1_mm +POSTHOOK: Output: default@multi0_2_mm +POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from multi0_1_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: select * from multi0_2_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_2_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +455 97 +455 98 +456 0 +456 10 +457 100 +457 103 +PREHOOK: query: from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi0_1_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi0_1_mm +POSTHOOK: Output: default@multi0_2_mm +POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from multi0_1_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +455 97 +455 98 +456 0 +456 10 +457 100 +457 103 +PREHOOK: query: select * from multi0_2_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_2_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +100 457 +103 457 +PREHOOK: query: drop table multi0_1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@multi0_1_mm +PREHOOK: Output: default@multi0_1_mm +POSTHOOK: query: drop table multi0_1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@multi0_1_mm +POSTHOOK: Output: default@multi0_1_mm +PREHOOK: query: drop table multi0_2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@multi0_2_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: drop table multi0_2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@multi0_2_mm +POSTHOOK: Output: default@multi0_2_mm +PREHOOK: query: drop table multi1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table multi1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@multi1_mm +POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@multi1_mm +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi1_mm@p=1 +PREHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +POSTHOOK: query: select * from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +0 456 2 +10 456 2 +97 455 2 +98 455 2 +100 457 2 +103 457 2 +455 97 1 +455 98 1 +456 0 1 +456 10 1 +457 100 1 +457 103 1 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi1_mm@p=1 +PREHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +POSTHOOK: query: select * from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +0 456 1 +0 456 2 +10 456 1 +10 456 2 +97 455 1 +97 455 2 +98 455 1 +98 455 2 +100 457 1 +100 457 2 +103 457 1 +103 457 2 +455 97 1 +455 97 2 +455 98 1 +455 98 2 +456 0 1 +456 0 2 +456 10 1 +456 10 2 +457 100 1 +457 100 2 +457 103 1 +457 103 2 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi1_mm +PREHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=455 +POSTHOOK: Output: default@multi1_mm@p=456 +POSTHOOK: Output: default@multi1_mm@p=457 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=457).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=457).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +PREHOOK: Input: default@multi1_mm@p=455 +PREHOOK: Input: default@multi1_mm@p=456 +PREHOOK: Input: default@multi1_mm@p=457 +#### A masked pattern was here #### +POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +POSTHOOK: Input: default@multi1_mm@p=455 +POSTHOOK: Input: default@multi1_mm@p=456 +POSTHOOK: Input: default@multi1_mm@p=457 +#### A masked pattern was here #### +0 456 1 +0 456 1 +0 456 2 +10 456 1 +10 456 1 +10 456 2 +97 455 1 +97 455 1 +97 455 2 +98 455 1 +98 455 1 +98 455 2 +100 457 1 +100 457 1 +100 457 2 +103 457 1 +103 457 1 +103 457 2 +455 97 1 +455 97 2 +455 97 455 +455 98 1 +455 98 2 +455 98 455 +456 0 1 +456 0 2 +456 0 456 +456 10 1 +456 10 2 +456 10 456 +457 100 1 +457 100 2 +457 100 457 +457 103 1 +457 103 2 +457 103 457 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@multi1_mm +PREHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +PREHOOK: Input: default@multi1_mm@p=455 +PREHOOK: Input: default@multi1_mm@p=456 +PREHOOK: Input: default@multi1_mm@p=457 +#### A masked pattern was here #### +POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +POSTHOOK: Input: default@multi1_mm@p=455 +POSTHOOK: Input: default@multi1_mm@p=456 +POSTHOOK: Input: default@multi1_mm@p=457 +#### A masked pattern was here #### +0 456 1 +0 456 1 +0 456 1 +0 456 2 +10 456 1 +10 456 1 +10 456 1 +10 456 2 +97 455 1 +97 455 1 +97 455 1 +97 455 2 +98 455 1 +98 455 1 +98 455 1 +98 455 2 +100 457 1 +100 457 1 +100 457 1 +100 457 2 +103 457 1 +103 457 1 +103 457 1 +103 457 2 +455 97 1 +455 97 1 +455 97 2 +455 97 455 +455 98 1 +455 98 1 +455 98 2 +455 98 455 +456 0 1 +456 0 1 +456 0 2 +456 0 456 +456 10 1 +456 10 1 +456 10 2 +456 10 456 +457 100 1 +457 100 1 +457 100 2 +457 100 457 +457 103 1 +457 103 1 +457 103 2 +457 103 457 +PREHOOK: query: drop table multi1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@multi1_mm +PREHOOK: Output: default@multi1_mm +POSTHOOK: query: drop table multi1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Output: default@multi1_mm +PREHOOK: query: drop table intermediate +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermediate +PREHOOK: Output: default@intermediate +POSTHOOK: query: drop table intermediate +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermediate +POSTHOOK: Output: default@intermediate diff --git a/ql/src/test/results/clientpositive/llap/mm_all2.q.out b/ql/src/test/results/clientpositive/llap/mm_all2.q.out new file mode 100644 index 000000000000..3921c7df640e --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/mm_all2.q.out @@ -0,0 +1,495 @@ +PREHOOK: query: -- Bucketing tests are slow and some tablesample ones don't work w/o MM + +-- Force multiple writers when reading +drop table intermediate +PREHOOK: type: DROPTABLE +POSTHOOK: query: -- Bucketing tests are slow and some tablesample ones don't work w/o MM + +-- Force multiple writers when reading +drop table intermediate +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermediate +POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermediate +PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=455 +POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=455 +POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=456 +POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=456 +POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=457 +POSTHOOK: query: insert into table intermediate partition(p='457') select distinct key from src where key >= 100 order by key asc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=457 +POSTHOOK: Lineage: intermediate PARTITION(p=457).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: drop table bucket0_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table bucket0_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table bucket0_mm(key int, id int) +clustered by (key) into 2 buckets +tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket0_mm +POSTHOOK: query: create table bucket0_mm(key int, id int) +clustered by (key) into 2 buckets +tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket0_mm +PREHOOK: query: insert into table bucket0_mm select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket0_mm +POSTHOOK: query: insert into table bucket0_mm select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket0_mm +POSTHOOK: Lineage: bucket0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +98 98 +0 0 +10 10 +100 100 +97 97 +103 103 +PREHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +98 98 +0 0 +10 10 +100 100 +PREHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +97 97 +103 103 +PREHOOK: query: insert into table bucket0_mm select key, key from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket0_mm +POSTHOOK: query: insert into table bucket0_mm select key, key from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket0_mm +POSTHOOK: Lineage: bucket0_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket0_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket0_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +98 98 +0 0 +10 10 +100 100 +97 97 +103 103 +98 98 +0 0 +10 10 +100 100 +97 97 +103 103 +PREHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm tablesample (bucket 1 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +98 98 +0 0 +10 10 +100 100 +98 98 +0 0 +10 10 +100 100 +PREHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket0_mm tablesample (bucket 2 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket0_mm +#### A masked pattern was here #### +97 97 +103 103 +97 97 +103 103 +PREHOOK: query: drop table bucket0_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bucket0_mm +PREHOOK: Output: default@bucket0_mm +POSTHOOK: query: drop table bucket0_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@bucket0_mm +POSTHOOK: Output: default@bucket0_mm +PREHOOK: query: drop table bucket1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table bucket1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table bucket1_mm(key int, id int) partitioned by (key2 int) +clustered by (key) sorted by (key) into 2 buckets +tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket1_mm +POSTHOOK: query: create table bucket1_mm(key int, id int) partitioned by (key2 int) +clustered by (key) sorted by (key) into 2 buckets +tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket1_mm +PREHOOK: query: insert into table bucket1_mm partition (key2) +select key + 1, key, key - 1 from intermediate +union all +select key - 1, key, key + 1 from intermediate +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket1_mm +POSTHOOK: query: insert into table bucket1_mm partition (key2) +select key + 1, key, key - 1 from intermediate +union all +select key - 1, key, key + 1 from intermediate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket1_mm@key2=-1 +POSTHOOK: Output: default@bucket1_mm@key2=1 +POSTHOOK: Output: default@bucket1_mm@key2=101 +POSTHOOK: Output: default@bucket1_mm@key2=102 +POSTHOOK: Output: default@bucket1_mm@key2=104 +POSTHOOK: Output: default@bucket1_mm@key2=11 +POSTHOOK: Output: default@bucket1_mm@key2=9 +POSTHOOK: Output: default@bucket1_mm@key2=96 +POSTHOOK: Output: default@bucket1_mm@key2=97 +POSTHOOK: Output: default@bucket1_mm@key2=98 +POSTHOOK: Output: default@bucket1_mm@key2=99 +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=-1).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=101).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=102).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=104).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=11).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=1).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=96).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=97).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=98).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=99).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).id EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket1_mm PARTITION(key2=9).key EXPRESSION [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket1_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket1_mm +PREHOOK: Input: default@bucket1_mm@key2=-1 +PREHOOK: Input: default@bucket1_mm@key2=1 +PREHOOK: Input: default@bucket1_mm@key2=101 +PREHOOK: Input: default@bucket1_mm@key2=102 +PREHOOK: Input: default@bucket1_mm@key2=104 +PREHOOK: Input: default@bucket1_mm@key2=11 +PREHOOK: Input: default@bucket1_mm@key2=9 +PREHOOK: Input: default@bucket1_mm@key2=96 +PREHOOK: Input: default@bucket1_mm@key2=97 +PREHOOK: Input: default@bucket1_mm@key2=98 +PREHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket1_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Input: default@bucket1_mm@key2=-1 +POSTHOOK: Input: default@bucket1_mm@key2=1 +POSTHOOK: Input: default@bucket1_mm@key2=101 +POSTHOOK: Input: default@bucket1_mm@key2=102 +POSTHOOK: Input: default@bucket1_mm@key2=104 +POSTHOOK: Input: default@bucket1_mm@key2=11 +POSTHOOK: Input: default@bucket1_mm@key2=9 +POSTHOOK: Input: default@bucket1_mm@key2=96 +POSTHOOK: Input: default@bucket1_mm@key2=97 +POSTHOOK: Input: default@bucket1_mm@key2=98 +POSTHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +1 0 -1 +-1 0 1 +99 100 101 +104 103 102 +102 103 104 +9 10 11 +11 10 9 +98 97 96 +99 98 97 +96 97 98 +97 98 99 +101 100 99 +PREHOOK: query: select * from bucket1_mm tablesample (bucket 1 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket1_mm +PREHOOK: Input: default@bucket1_mm@key2=-1 +PREHOOK: Input: default@bucket1_mm@key2=1 +PREHOOK: Input: default@bucket1_mm@key2=101 +PREHOOK: Input: default@bucket1_mm@key2=102 +PREHOOK: Input: default@bucket1_mm@key2=104 +PREHOOK: Input: default@bucket1_mm@key2=11 +PREHOOK: Input: default@bucket1_mm@key2=9 +PREHOOK: Input: default@bucket1_mm@key2=96 +PREHOOK: Input: default@bucket1_mm@key2=97 +PREHOOK: Input: default@bucket1_mm@key2=98 +PREHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket1_mm tablesample (bucket 1 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Input: default@bucket1_mm@key2=-1 +POSTHOOK: Input: default@bucket1_mm@key2=1 +POSTHOOK: Input: default@bucket1_mm@key2=101 +POSTHOOK: Input: default@bucket1_mm@key2=102 +POSTHOOK: Input: default@bucket1_mm@key2=104 +POSTHOOK: Input: default@bucket1_mm@key2=11 +POSTHOOK: Input: default@bucket1_mm@key2=9 +POSTHOOK: Input: default@bucket1_mm@key2=96 +POSTHOOK: Input: default@bucket1_mm@key2=97 +POSTHOOK: Input: default@bucket1_mm@key2=98 +POSTHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +104 103 102 +102 103 104 +98 97 96 +96 97 98 +PREHOOK: query: select * from bucket1_mm tablesample (bucket 2 out of 2) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket1_mm +PREHOOK: Input: default@bucket1_mm@key2=-1 +PREHOOK: Input: default@bucket1_mm@key2=1 +PREHOOK: Input: default@bucket1_mm@key2=101 +PREHOOK: Input: default@bucket1_mm@key2=102 +PREHOOK: Input: default@bucket1_mm@key2=104 +PREHOOK: Input: default@bucket1_mm@key2=11 +PREHOOK: Input: default@bucket1_mm@key2=9 +PREHOOK: Input: default@bucket1_mm@key2=96 +PREHOOK: Input: default@bucket1_mm@key2=97 +PREHOOK: Input: default@bucket1_mm@key2=98 +PREHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket1_mm tablesample (bucket 2 out of 2) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Input: default@bucket1_mm@key2=-1 +POSTHOOK: Input: default@bucket1_mm@key2=1 +POSTHOOK: Input: default@bucket1_mm@key2=101 +POSTHOOK: Input: default@bucket1_mm@key2=102 +POSTHOOK: Input: default@bucket1_mm@key2=104 +POSTHOOK: Input: default@bucket1_mm@key2=11 +POSTHOOK: Input: default@bucket1_mm@key2=9 +POSTHOOK: Input: default@bucket1_mm@key2=96 +POSTHOOK: Input: default@bucket1_mm@key2=97 +POSTHOOK: Input: default@bucket1_mm@key2=98 +POSTHOOK: Input: default@bucket1_mm@key2=99 +#### A masked pattern was here #### +1 0 -1 +-1 0 1 +99 100 101 +9 10 11 +11 10 9 +99 98 97 +97 98 99 +101 100 99 +PREHOOK: query: drop table bucket1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bucket1_mm +PREHOOK: Output: default@bucket1_mm +POSTHOOK: query: drop table bucket1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@bucket1_mm +POSTHOOK: Output: default@bucket1_mm +PREHOOK: query: drop table bucket2_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table bucket2_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table bucket2_mm(key int, id int) +clustered by (key) into 10 buckets +tblproperties('hivecommit'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket2_mm +POSTHOOK: query: create table bucket2_mm(key int, id int) +clustered by (key) into 10 buckets +tblproperties('hivecommit'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket2_mm +PREHOOK: query: insert into table bucket2_mm select key, key from intermediate where key == 0 +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket2_mm +POSTHOOK: query: insert into table bucket2_mm select key, key from intermediate where key == 0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket2_mm +POSTHOOK: Lineage: bucket2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +0 0 +PREHOOK: query: select * from bucket2_mm tablesample (bucket 1 out of 10) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm tablesample (bucket 1 out of 10) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +0 0 +PREHOOK: query: select * from bucket2_mm tablesample (bucket 4 out of 10) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm tablesample (bucket 4 out of 10) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +PREHOOK: query: insert into table bucket2_mm select key, key from intermediate where key in (0, 103) +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Input: default@intermediate@p=457 +PREHOOK: Output: default@bucket2_mm +POSTHOOK: query: insert into table bucket2_mm select key, key from intermediate where key in (0, 103) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Input: default@intermediate@p=457 +POSTHOOK: Output: default@bucket2_mm +POSTHOOK: Lineage: bucket2_mm.id SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: bucket2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from bucket2_mm +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +0 0 +0 0 +103 103 +PREHOOK: query: select * from bucket2_mm tablesample (bucket 1 out of 10) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm tablesample (bucket 1 out of 10) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +0 0 +0 0 +PREHOOK: query: select * from bucket2_mm tablesample (bucket 4 out of 10) s +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket2_mm tablesample (bucket 4 out of 10) s +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket2_mm +#### A masked pattern was here #### +103 103 +PREHOOK: query: drop table bucket2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@bucket2_mm +PREHOOK: Output: default@bucket2_mm +POSTHOOK: query: drop table bucket2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@bucket2_mm +POSTHOOK: Output: default@bucket2_mm +PREHOOK: query: drop table intermediate +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermediate +PREHOOK: Output: default@intermediate +POSTHOOK: query: drop table intermediate +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermediate +POSTHOOK: Output: default@intermediate diff --git a/ql/src/test/results/clientpositive/llap/mm_current.q.out b/ql/src/test/results/clientpositive/llap/mm_current.q.out new file mode 100644 index 000000000000..0522288b31b7 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/mm_current.q.out @@ -0,0 +1,420 @@ +PREHOOK: query: drop table intermediate +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table intermediate +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@intermediate +POSTHOOK: query: create table intermediate(key int) partitioned by (p int) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@intermediate +PREHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=455 +POSTHOOK: query: insert into table intermediate partition(p='455') select distinct key from src where key >= 0 order by key desc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=455 +POSTHOOK: Lineage: intermediate PARTITION(p=455).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@intermediate@p=456 +POSTHOOK: query: insert into table intermediate partition(p='456') select distinct key from src where key is not null order by key asc limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@intermediate@p=456 +POSTHOOK: Lineage: intermediate PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: drop table multi0_1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table multi0_1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table multi0_2_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table multi0_2_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='false') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@multi0_1_mm +POSTHOOK: query: create table multi0_1_mm (key int, key2 int) tblproperties('hivecommit'='false') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@multi0_1_mm +PREHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='false') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: create table multi0_2_mm (key int, key2 int) tblproperties('hivecommit'='false') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@multi0_2_mm +PREHOOK: query: from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@multi0_1_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: from intermediate +insert overwrite table multi0_1_mm select key, p +insert overwrite table multi0_2_mm select p, key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi0_1_mm +POSTHOOK: Output: default@multi0_2_mm +POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from multi0_1_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +PREHOOK: query: select * from multi0_2_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_2_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +455 97 +455 98 +456 0 +456 10 +PREHOOK: query: from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@multi0_1_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: from intermediate +insert into table multi0_1_mm select p, key +insert overwrite table multi0_2_mm select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi0_1_mm +POSTHOOK: Output: default@multi0_2_mm +POSTHOOK: Lineage: multi0_1_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_1_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi0_2_mm.key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from multi0_1_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_1_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_1_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +455 97 +455 98 +456 0 +456 10 +PREHOOK: query: select * from multi0_2_mm order by key, key2 +PREHOOK: type: QUERY +PREHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +POSTHOOK: query: select * from multi0_2_mm order by key, key2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi0_2_mm +#### A masked pattern was here #### +0 456 +10 456 +97 455 +98 455 +PREHOOK: query: drop table multi0_1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@multi0_1_mm +PREHOOK: Output: default@multi0_1_mm +POSTHOOK: query: drop table multi0_1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@multi0_1_mm +POSTHOOK: Output: default@multi0_1_mm +PREHOOK: query: drop table multi0_2_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@multi0_2_mm +PREHOOK: Output: default@multi0_2_mm +POSTHOOK: query: drop table multi0_2_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@multi0_2_mm +POSTHOOK: Output: default@multi0_2_mm +PREHOOK: query: drop table multi1_mm +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table multi1_mm +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='false') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@multi1_mm +POSTHOOK: query: create table multi1_mm (key int, key2 int) partitioned by (p int) tblproperties('hivecommit'='false') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@multi1_mm +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@multi1_mm@p=1 +PREHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p=1) select p, key +insert into table multi1_mm partition(p=2) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select * from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +POSTHOOK: query: select * from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +0 456 2 +10 456 2 +97 455 2 +98 455 2 +455 97 1 +455 98 1 +456 0 1 +456 10 1 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@multi1_mm@p=1 +PREHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p=2) select p, key +insert overwrite table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=2 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=2).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select * from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +POSTHOOK: query: select * from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +#### A masked pattern was here #### +0 456 1 +0 456 2 +10 456 1 +10 456 2 +97 455 1 +97 455 2 +98 455 1 +98 455 2 +455 97 1 +455 97 2 +455 98 1 +455 98 2 +456 0 1 +456 0 2 +456 10 1 +456 10 2 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@multi1_mm +PREHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, p +insert into table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Output: default@multi1_mm@p=455 +POSTHOOK: Output: default@multi1_mm@p=456 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=455).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=456).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +PREHOOK: Input: default@multi1_mm@p=455 +PREHOOK: Input: default@multi1_mm@p=456 +#### A masked pattern was here #### +POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +POSTHOOK: Input: default@multi1_mm@p=455 +POSTHOOK: Input: default@multi1_mm@p=456 +#### A masked pattern was here #### +0 456 1 +0 456 1 +0 456 2 +10 456 1 +10 456 1 +10 456 2 +97 455 1 +97 455 1 +97 455 2 +98 455 1 +98 455 1 +98 455 2 +455 97 1 +455 97 2 +455 97 455 +455 98 1 +455 98 2 +455 98 455 +456 0 1 +456 0 2 +456 0 456 +456 10 1 +456 10 2 +456 10 456 +PREHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p +PREHOOK: type: QUERY +PREHOOK: Input: default@intermediate +PREHOOK: Input: default@intermediate@p=455 +PREHOOK: Input: default@intermediate@p=456 +PREHOOK: Output: default@multi1_mm +PREHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: query: from intermediate +insert into table multi1_mm partition(p) select p, key, 1 +insert into table multi1_mm partition(p=1) select key, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@intermediate +POSTHOOK: Input: default@intermediate@p=455 +POSTHOOK: Input: default@intermediate@p=456 +POSTHOOK: Output: default@multi1_mm@p=1 +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: multi1_mm PARTITION(p=1).key2 SIMPLE [(intermediate)intermediate.FieldSchema(name:p, type:int, comment:null), ] +PREHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +PREHOOK: type: QUERY +PREHOOK: Input: default@multi1_mm +PREHOOK: Input: default@multi1_mm@p=1 +PREHOOK: Input: default@multi1_mm@p=2 +PREHOOK: Input: default@multi1_mm@p=455 +PREHOOK: Input: default@multi1_mm@p=456 +#### A masked pattern was here #### +POSTHOOK: query: select key, key2, p from multi1_mm order by key, key2, p +POSTHOOK: type: QUERY +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Input: default@multi1_mm@p=1 +POSTHOOK: Input: default@multi1_mm@p=2 +POSTHOOK: Input: default@multi1_mm@p=455 +POSTHOOK: Input: default@multi1_mm@p=456 +#### A masked pattern was here #### +0 456 1 +0 456 1 +0 456 1 +0 456 2 +10 456 1 +10 456 1 +10 456 1 +10 456 2 +97 455 1 +97 455 1 +97 455 1 +97 455 2 +98 455 1 +98 455 1 +98 455 1 +98 455 2 +455 97 1 +455 97 1 +455 97 2 +455 97 455 +455 98 1 +455 98 1 +455 98 2 +455 98 455 +456 0 1 +456 0 1 +456 0 2 +456 0 456 +456 10 1 +456 10 1 +456 10 2 +456 10 456 +PREHOOK: query: drop table multi1_mm +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@multi1_mm +PREHOOK: Output: default@multi1_mm +POSTHOOK: query: drop table multi1_mm +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@multi1_mm +POSTHOOK: Output: default@multi1_mm +PREHOOK: query: drop table intermediate +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@intermediate +PREHOOK: Output: default@intermediate +POSTHOOK: query: drop table intermediate +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@intermediate +POSTHOOK: Output: default@intermediate diff --git a/ql/src/test/results/clientpositive/mm_insertonly_acid.q.out b/ql/src/test/results/clientpositive/mm_insertonly_acid.q.out new file mode 100644 index 000000000000..6f7d198b7aa8 --- /dev/null +++ b/ql/src/test/results/clientpositive/mm_insertonly_acid.q.out @@ -0,0 +1,115 @@ +PREHOOK: query: drop table qtr_acid +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table qtr_acid +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table qtr_acid (key int) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@qtr_acid +POSTHOOK: query: create table qtr_acid (key int) partitioned by (p int) tblproperties ("transactional"="true", "transactional_properties"="insert_only") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@qtr_acid +PREHOOK: query: insert into table qtr_acid partition(p='123') select distinct key from src where key > 0 order by key asc limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@qtr_acid@p=123 +POSTHOOK: query: insert into table qtr_acid partition(p='123') select distinct key from src where key > 0 order by key asc limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@qtr_acid@p=123 +POSTHOOK: Lineage: qtr_acid PARTITION(p=123).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: insert into table qtr_acid partition(p='456') select distinct key from src where key > 0 order by key desc limit 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@qtr_acid@p=456 +POSTHOOK: query: insert into table qtr_acid partition(p='456') select distinct key from src where key > 0 order by key desc limit 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@qtr_acid@p=456 +POSTHOOK: Lineage: qtr_acid PARTITION(p=456).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +PREHOOK: query: explain +select * from qtr_acid order by key +PREHOOK: type: QUERY +POSTHOOK: query: explain +select * from qtr_acid order by key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: qtr_acid + Statistics: Num rows: 16 Data size: 67 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: int), p (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 16 Data size: 67 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Statistics: Num rows: 16 Data size: 67 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: int) + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 16 Data size: 67 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 16 Data size: 67 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from qtr_acid order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@qtr_acid +PREHOOK: Input: default@qtr_acid@p=123 +PREHOOK: Input: default@qtr_acid@p=456 +#### A masked pattern was here #### +POSTHOOK: query: select * from qtr_acid order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@qtr_acid +POSTHOOK: Input: default@qtr_acid@p=123 +POSTHOOK: Input: default@qtr_acid@p=456 +#### A masked pattern was here #### +9 456 +10 123 +11 123 +85 456 +86 456 +87 456 +90 456 +92 456 +95 456 +96 456 +97 456 +98 456 +100 123 +103 123 +104 123 +105 123 +111 123 +113 123 +114 123 +116 123 +PREHOOK: query: drop table qtr_acid +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@qtr_acid +PREHOOK: Output: default@qtr_acid +POSTHOOK: query: drop table qtr_acid +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@qtr_acid +POSTHOOK: Output: default@qtr_acid