From eec8d60cf40a39a241fc00d4d04cccd53d25e06c Mon Sep 17 00:00:00 2001 From: Sourabh Badhya Date: Tue, 3 Jan 2023 13:59:16 +0530 Subject: [PATCH 1/5] HIVE-11495: Add aborted reason to transaction information --- .../abort/AbortTransactionsOperation.java | 3 +- .../hive/ql/exec/repl/ReplDumpTask.java | 7 +- .../hive/ql/exec/repl/ReplLoadTask.java | 3 +- .../apache/hadoop/hive/ql/metadata/Hive.java | 4 +- .../hadoop/hive/ql/txn/compactor/Worker.java | 3 +- .../ql/util/HiveStrictManagedMigration.java | 5 +- .../thrift/gen-cpp/hive_metastore_types.cpp | 25 ++++ .../gen/thrift/gen-cpp/hive_metastore_types.h | 16 ++- .../hive/metastore/api/AbortTxnsRequest.java | 108 +++++++++++++++++- .../gen-php/metastore/AbortTxnsRequest.php | 24 ++++ .../thrift/gen-py/hive_metastore/ttypes.py | 14 ++- .../gen/thrift/gen-rb/hive_metastore_types.rb | 4 +- .../hive/metastore/HiveMetaStoreClient.java | 7 ++ .../hive/metastore/IMetaStoreClient.java | 9 ++ .../hive/metastore/hive_metastore.proto | 1 + .../src/main/thrift/hive_metastore.thrift | 1 + .../apache/hadoop/hive/metastore/Msck.java | 3 +- .../hive/metastore/txn/TxnErrorMsg.java | 83 ++++++++++++++ .../hadoop/hive/metastore/txn/TxnHandler.java | 35 ++++-- .../HiveMetaStoreClientPreCatalog.java | 7 ++ 20 files changed, 338 insertions(+), 24 deletions(-) create mode 100644 standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/AbortTransactionsOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/AbortTransactionsOperation.java index 8a02ba367c1e..8250ac7efe50 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/AbortTransactionsOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/process/abort/AbortTransactionsOperation.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.ddl.process.abort; +import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.metadata.HiveException; @@ -32,7 +33,7 @@ public AbortTransactionsOperation(DDLOperationContext context, AbortTransactions @Override public int execute() throws HiveException { - context.getDb().abortTransactions(desc.getTransactionIds()); + context.getDb().abortTransactions(desc.getTransactionIds(), TxnErrorMsg.ABORT_QUERY.getErrorCode()); return 0; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index 2996ab10ead4..e7c329a5f25f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hive.metastore.messaging.event.filters.CatalogFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter; import org.apache.hadoop.hive.metastore.messaging.event.filters.ReplEventFilter; +import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.SecurityUtils; import org.apache.hadoop.hive.metastore.utils.StringUtils; @@ -775,7 +776,7 @@ private void fetchFailoverMetadata(Hive hiveDb) throws HiveException, IOExceptio List txnsForDb = getOpenTxns(hiveTxnManager, hiveTxnManager.getValidTxns(excludedTxns), work.dbNameOrPattern); if (!txnsForDb.isEmpty()) { LOG.debug("Going to abort transactions: {} for database: {}.", txnsForDb, work.dbNameOrPattern); - hiveDb.abortTransactions(txnsForDb); + hiveDb.abortTransactions(txnsForDb, TxnErrorMsg.ABORT_FETCH_FAILOVER_METADATA.getErrorCode()); } fmd.setAbortedTxns(txnsForDb); fmd.setCursorPoint(currentNotificationId(hiveDb)); @@ -786,7 +787,7 @@ private void fetchFailoverMetadata(Hive hiveDb) throws HiveException, IOExceptio txnsForDb = getOpenTxns(hiveTxnManager, allValidTxns, work.dbNameOrPattern); if (!txnsForDb.isEmpty()) { LOG.debug("Going to abort transactions: {} for database: {}.", txnsForDb, work.dbNameOrPattern); - hiveDb.abortTransactions(txnsForDb); + hiveDb.abortTransactions(txnsForDb, TxnErrorMsg.ABORT_FETCH_FAILOVER_METADATA.getErrorCode()); fmd.addToAbortedTxns(txnsForDb); } fmd.setFailoverEventId(currentNotificationId(hiveDb)); @@ -1585,7 +1586,7 @@ String getValidTxnListForReplDump(Hive hiveDb, long waitUntilTime) throws HiveEx List openTxns = getOpenTxns(hiveTxnManager, validTxnList, work.dbNameOrPattern); if (!openTxns.isEmpty()) { //abort only write transactions for the db under replication if abort transactions is enabled. - hiveDb.abortTransactions(openTxns); + hiveDb.abortTransactions(openTxns, TxnErrorMsg.ABORT_WRITE_TXN_AFTER_TIMEOUT.getErrorCode()); validTxnList = hiveTxnManager.getValidTxns(excludedTxns); openTxns = getOpenTxns(hiveTxnManager, validTxnList, work.dbNameOrPattern); if (!openTxns.isEmpty()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index 7be785a97ee2..e117e97bf1b6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.api.NotificationEvent; import org.apache.hadoop.hive.metastore.api.TxnType; +import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ddl.database.alter.owner.AlterDatabaseSetOwnerDesc; import org.apache.hadoop.hive.ql.ddl.privilege.PrincipalDesc; @@ -878,7 +879,7 @@ private void abortOpenTxnsForDatabase(HiveTxnManager hiveTxnManager, ValidTxnLis if (!openTxns.isEmpty()) { LOG.info("Rolling back write txns:" + openTxns.toString() + " for the database: " + dbName); //abort only write transactions for the current database if abort transactions is enabled. - hiveDb.abortTransactions(openTxns); + hiveDb.abortTransactions(openTxns, TxnErrorMsg.ABORT_ONGOING_TXN_FOR_TARGET_DB.getErrorCode()); validTxnList = hiveTxnManager.getValidTxns(excludedTxns); openTxns = ReplUtils.getOpenTxns(hiveTxnManager, validTxnList, dbName); if (!openTxns.isEmpty()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index d9301c3ea4bf..704e83b1d943 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -5940,9 +5940,9 @@ public GetOpenTxnsInfoResponse showTransactions() throws HiveException { } } - public void abortTransactions(List txnids) throws HiveException { + public void abortTransactions(List txnids, long errorCode) throws HiveException { try { - getMSC().abortTxns(txnids); + getMSC().abortTxns(txnids, errorCode); } catch (Exception e) { LOG.error("Failed abortTransactions", e); throw new HiveException(e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index adadc415d4d7..c2f5aa87a349 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.metrics.AcidMetricService; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; +import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.metastore.txn.TxnStatus; import org.apache.hadoop.hive.ql.io.AcidDirectory; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -659,7 +660,7 @@ private void commit() throws TException { */ private void abort() throws TException { if (status == TxnStatus.OPEN) { - msc.abortTxns(Collections.singletonList(txnId)); + msc.abortTxns(Collections.singletonList(txnId), TxnErrorMsg.ABORT_COMPACTION_TXN.getErrorCode()); status = TxnStatus.ABORTED; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java index 50220751d0cb..1eaa3c319acc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java @@ -59,6 +59,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableValidWriteIds; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; +import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.HiveStrictManagedUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; @@ -1450,7 +1451,7 @@ private TxnCtx generateTxnCtxForAlter( result = new TxnCtx(writeId, validWriteIds, txnId); } finally { if (result == null) { - msc.abortTxns(Lists.newArrayList(txnId)); + msc.abortTxns(Lists.newArrayList(txnId), TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode()); } } return result; @@ -1466,7 +1467,7 @@ private void closeTxnCtx(TxnCtx txnCtx, IMetaStoreClient msc, boolean isOk) if (isOk) { msc.commitTxn(txnCtx.txnId); } else { - msc.abortTxns(Lists.newArrayList(txnCtx.txnId)); + msc.abortTxns(Lists.newArrayList(txnCtx.txnId), TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode()); } } catch (TException ex) { throw new HiveException(ex); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index a482833ebec9..d9e511cf16f7 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -22027,6 +22027,11 @@ AbortTxnsRequest::~AbortTxnsRequest() noexcept { void AbortTxnsRequest::__set_txn_ids(const std::vector & val) { this->txn_ids = val; } + +void AbortTxnsRequest::__set_errorCode(const int64_t val) { + this->errorCode = val; +__isset.errorCode = true; +} std::ostream& operator<<(std::ostream& out, const AbortTxnsRequest& obj) { obj.printTo(out); @@ -22076,6 +22081,14 @@ uint32_t AbortTxnsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->errorCode); + this->__isset.errorCode = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -22107,6 +22120,11 @@ uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) c } xfer += oprot->writeFieldEnd(); + if (this->__isset.errorCode) { + xfer += oprot->writeFieldBegin("errorCode", ::apache::thrift::protocol::T_I64, 2); + xfer += oprot->writeI64(this->errorCode); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -22115,19 +22133,26 @@ uint32_t AbortTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) c void swap(AbortTxnsRequest &a, AbortTxnsRequest &b) { using ::std::swap; swap(a.txn_ids, b.txn_ids); + swap(a.errorCode, b.errorCode); + swap(a.__isset, b.__isset); } AbortTxnsRequest::AbortTxnsRequest(const AbortTxnsRequest& other826) { txn_ids = other826.txn_ids; + errorCode = other826.errorCode; + __isset = other826.__isset; } AbortTxnsRequest& AbortTxnsRequest::operator=(const AbortTxnsRequest& other827) { txn_ids = other827.txn_ids; + errorCode = other827.errorCode; + __isset = other827.__isset; return *this; } void AbortTxnsRequest::printTo(std::ostream& out) const { using ::apache::thrift::to_string; out << "AbortTxnsRequest("; out << "txn_ids=" << to_string(txn_ids); + out << ", " << "errorCode="; (__isset.errorCode ? (out << to_string(errorCode)) : (out << "")); out << ")"; } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h index bac4f87320d8..5206891c1974 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -8856,24 +8856,38 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b); std::ostream& operator<<(std::ostream& out, const AbortTxnRequest& obj); +typedef struct _AbortTxnsRequest__isset { + _AbortTxnsRequest__isset() : errorCode(false) {} + bool errorCode :1; +} _AbortTxnsRequest__isset; class AbortTxnsRequest : public virtual ::apache::thrift::TBase { public: AbortTxnsRequest(const AbortTxnsRequest&); AbortTxnsRequest& operator=(const AbortTxnsRequest&); - AbortTxnsRequest() noexcept { + AbortTxnsRequest() noexcept + : errorCode(0) { } virtual ~AbortTxnsRequest() noexcept; std::vector txn_ids; + int64_t errorCode; + + _AbortTxnsRequest__isset __isset; void __set_txn_ids(const std::vector & val); + void __set_errorCode(const int64_t val); + bool operator == (const AbortTxnsRequest & rhs) const { if (!(txn_ids == rhs.txn_ids)) return false; + if (__isset.errorCode != rhs.__isset.errorCode) + return false; + else if (__isset.errorCode && !(errorCode == rhs.errorCode)) + return false; return true; } bool operator != (const AbortTxnsRequest &rhs) const { diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java index 09e9808a0ed0..249ea633cba3 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java @@ -12,15 +12,18 @@ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AbortTxnsRequest"); private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_ids", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I64, (short)2); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new AbortTxnsRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new AbortTxnsRequestTupleSchemeFactory(); private @org.apache.thrift.annotation.Nullable java.util.List txn_ids; // required + private long errorCode; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - TXN_IDS((short)1, "txn_ids"); + TXN_IDS((short)1, "txn_ids"), + ERROR_CODE((short)2, "errorCode"); private static final java.util.Map byName = new java.util.HashMap(); @@ -38,6 +41,8 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 1: // TXN_IDS return TXN_IDS; + case 2: // ERROR_CODE + return ERROR_CODE; default: return null; } @@ -79,12 +84,17 @@ public java.lang.String getFieldName() { } // isset id assignments + private static final int __ERRORCODE_ISSET_ID = 0; + private byte __isset_bitfield = 0; + private static final _Fields optionals[] = {_Fields.ERROR_CODE}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.TXN_IDS, new org.apache.thrift.meta_data.FieldMetaData("txn_ids", org.apache.thrift.TFieldRequirementType.REQUIRED, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)))); + tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AbortTxnsRequest.class, metaDataMap); } @@ -103,10 +113,12 @@ public AbortTxnsRequest( * Performs a deep copy on other. */ public AbortTxnsRequest(AbortTxnsRequest other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetTxn_ids()) { java.util.List __this__txn_ids = new java.util.ArrayList(other.txn_ids); this.txn_ids = __this__txn_ids; } + this.errorCode = other.errorCode; } public AbortTxnsRequest deepCopy() { @@ -116,6 +128,8 @@ public AbortTxnsRequest deepCopy() { @Override public void clear() { this.txn_ids = null; + setErrorCodeIsSet(false); + this.errorCode = 0; } public int getTxn_idsSize() { @@ -158,6 +172,28 @@ public void setTxn_idsIsSet(boolean value) { } } + public long getErrorCode() { + return this.errorCode; + } + + public void setErrorCode(long errorCode) { + this.errorCode = errorCode; + setErrorCodeIsSet(true); + } + + public void unsetErrorCode() { + __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __ERRORCODE_ISSET_ID); + } + + /** Returns true if field errorCode is set (has been assigned a value) and false otherwise */ + public boolean isSetErrorCode() { + return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __ERRORCODE_ISSET_ID); + } + + public void setErrorCodeIsSet(boolean value) { + __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ERRORCODE_ISSET_ID, value); + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case TXN_IDS: @@ -168,6 +204,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case ERROR_CODE: + if (value == null) { + unsetErrorCode(); + } else { + setErrorCode((java.lang.Long)value); + } + break; + } } @@ -177,6 +221,9 @@ public java.lang.Object getFieldValue(_Fields field) { case TXN_IDS: return getTxn_ids(); + case ERROR_CODE: + return getErrorCode(); + } throw new java.lang.IllegalStateException(); } @@ -190,6 +237,8 @@ public boolean isSet(_Fields field) { switch (field) { case TXN_IDS: return isSetTxn_ids(); + case ERROR_CODE: + return isSetErrorCode(); } throw new java.lang.IllegalStateException(); } @@ -216,6 +265,15 @@ public boolean equals(AbortTxnsRequest that) { return false; } + boolean this_present_errorCode = true && this.isSetErrorCode(); + boolean that_present_errorCode = true && that.isSetErrorCode(); + if (this_present_errorCode || that_present_errorCode) { + if (!(this_present_errorCode && that_present_errorCode)) + return false; + if (this.errorCode != that.errorCode) + return false; + } + return true; } @@ -227,6 +285,10 @@ public int hashCode() { if (isSetTxn_ids()) hashCode = hashCode * 8191 + txn_ids.hashCode(); + hashCode = hashCode * 8191 + ((isSetErrorCode()) ? 131071 : 524287); + if (isSetErrorCode()) + hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(errorCode); + return hashCode; } @@ -248,6 +310,16 @@ public int compareTo(AbortTxnsRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetErrorCode(), other.isSetErrorCode()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetErrorCode()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorCode, other.errorCode); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -276,6 +348,12 @@ public java.lang.String toString() { sb.append(this.txn_ids); } first = false; + if (isSetErrorCode()) { + if (!first) sb.append(", "); + sb.append("errorCode:"); + sb.append(this.errorCode); + first = false; + } sb.append(")"); return sb.toString(); } @@ -299,6 +377,8 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, java.lang.ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); @@ -341,6 +421,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AbortTxnsRequest st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 2: // ERROR_CODE + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.errorCode = iprot.readI64(); + struct.setErrorCodeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -366,6 +454,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AbortTxnsRequest s } oprot.writeFieldEnd(); } + if (struct.isSetErrorCode()) { + oprot.writeFieldBegin(ERROR_CODE_FIELD_DESC); + oprot.writeI64(struct.errorCode); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -390,6 +483,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest st oprot.writeI64(_iter746); } } + java.util.BitSet optionals = new java.util.BitSet(); + if (struct.isSetErrorCode()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetErrorCode()) { + oprot.writeI64(struct.errorCode); + } } @Override @@ -406,6 +507,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest str } } struct.setTxn_idsIsSet(true); + java.util.BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.errorCode = iprot.readI64(); + struct.setErrorCodeIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnsRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnsRequest.php index 069473243212..ca1519f36afa 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnsRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnsRequest.php @@ -30,12 +30,21 @@ class AbortTxnsRequest 'type' => TType::I64, ), ), + 2 => array( + 'var' => 'errorCode', + 'isRequired' => false, + 'type' => TType::I64, + ), ); /** * @var int[] */ public $txn_ids = null; + /** + * @var int + */ + public $errorCode = null; public function __construct($vals = null) { @@ -43,6 +52,9 @@ public function __construct($vals = null) if (isset($vals['txn_ids'])) { $this->txn_ids = $vals['txn_ids']; } + if (isset($vals['errorCode'])) { + $this->errorCode = $vals['errorCode']; + } } } @@ -81,6 +93,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 2: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->errorCode); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -107,6 +126,11 @@ public function write($output) $output->writeListEnd(); $xfer += $output->writeFieldEnd(); } + if ($this->errorCode !== null) { + $xfer += $output->writeFieldBegin('errorCode', TType::I64, 2); + $xfer += $output->writeI64($this->errorCode); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 5f0c58a0ed71..fd67b0598f41 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -12599,12 +12599,14 @@ class AbortTxnsRequest(object): """ Attributes: - txn_ids + - errorCode """ - def __init__(self, txn_ids=None,): + def __init__(self, txn_ids=None, errorCode=None,): self.txn_ids = txn_ids + self.errorCode = errorCode def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -12625,6 +12627,11 @@ def read(self, iprot): iprot.readListEnd() else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I64: + self.errorCode = iprot.readI64() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -12642,6 +12649,10 @@ def write(self, oprot): oprot.writeI64(iter659) oprot.writeListEnd() oprot.writeFieldEnd() + if self.errorCode is not None: + oprot.writeFieldBegin('errorCode', TType.I64, 2) + oprot.writeI64(self.errorCode) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31016,6 +31027,7 @@ def __ne__(self, other): AbortTxnsRequest.thrift_spec = ( None, # 0 (1, TType.LIST, 'txn_ids', (TType.I64, None, False), None, ), # 1 + (2, TType.I64, 'errorCode', None, None, ), # 2 ) all_structs.append(CommitTxnKeyValue) CommitTxnKeyValue.thrift_spec = ( diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index c30dd8203f56..c549152fe02b 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -3768,9 +3768,11 @@ def validate class AbortTxnsRequest include ::Thrift::Struct, ::Thrift::Struct_Union TXN_IDS = 1 + ERRORCODE = 2 FIELDS = { - TXN_IDS => {:type => ::Thrift::Types::LIST, :name => 'txn_ids', :element => {:type => ::Thrift::Types::I64}} + TXN_IDS => {:type => ::Thrift::Types::LIST, :name => 'txn_ids', :element => {:type => ::Thrift::Types::I64}}, + ERRORCODE => {:type => ::Thrift::Types::I64, :name => 'errorCode', :optional => true} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 43ca7619fa1f..a25e6c3306ab 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -4048,6 +4048,13 @@ public void abortTxns(List txnids) throws NoSuchTxnException, TException { client.abort_txns(new AbortTxnsRequest(txnids)); } + @Override + public void abortTxns(List txnids, long errorCode) throws NoSuchTxnException, TException { + AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(txnids); + abortTxnsRequest.setErrorCode(errorCode); + client.abort_txns(abortTxnsRequest); + } + @Override public void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List partNames) throws TException { diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 5d31161db3b6..9888e2e8d15f 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -3227,6 +3227,15 @@ void commitTxn(CommitTxnRequest rqst) */ void abortTxns(List txnids) throws TException; + /** + * Abort a list of transactions with additional information of + * errorcodes as defined in TxnErrorMsg.java. + * @param txnids List of transaction ids + * @param errorCode Error code specifying the reason for abort. + * @throws TException + */ + void abortTxns(List txnids, long errorCode) throws TException; + /** * Allocate a per table write ID and associate it with the given transaction. * @param txnId id of transaction to which the allocated write ID to be associated. diff --git a/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto b/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto index 56cf6c9109bd..b7da8520b96a 100644 --- a/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto +++ b/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto @@ -1973,6 +1973,7 @@ message AbortTxnResponse { message AbortTxnsRequest { repeated int64 txn_ids = 1; + int64 errorCode = 2; } message AbortTxnsResponse { diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 78bf0b7466af..ccd9e32880ee 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -1080,6 +1080,7 @@ struct AbortTxnRequest { struct AbortTxnsRequest { 1: required list txn_ids, + 2: optional i64 errorCode, } struct CommitTxnKeyValue { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java index ea4f57a4d2bf..6fb5d1b64cbf 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; @@ -302,7 +303,7 @@ private boolean closeTxn(String qualifiedTableName, boolean success, long txnId) } else { try { LOG.info("txnId: {} failed. Aborting..", txnId); - getMsc().abortTxns(Lists.newArrayList(txnId)); + getMsc().abortTxns(Lists.newArrayList(txnId), TxnErrorMsg.ABORT_MSCK_TXN.getErrorCode()); } catch (Exception e) { LOG.error("Error while aborting txnId: {} for table: {}", txnId, qualifiedTableName, e); ret = false; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java new file mode 100644 index 000000000000..0e324a336636 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.txn; + +/** + * The following class represents all the error messages that are handled for aborts. + */ +public enum TxnErrorMsg { + // 50000 to 59999: Txn Errors Codes. + // Query runtime aborts + NONE(50000, "None"), + ABORT_QUERY(50001, " Txn aborted by Abort Query Command"), + ABORT_CONCURRENT(50002, " Txn aborted due to concurrent committed transaction"), + ABORT_WRITE_CONFLICT(50003, " Txn aborted due to write conflicts"), + ABORT_TIMEOUT(50004, " Txn aborted due to heartbeat time-out"), + ABORT_ROLLBACK(50005, "Txn aborted due to rollback"), + ABORT_COMPACTION_TXN(50006, "Compaction txn is aborted"), + ABORT_MSCK_TXN(50007, "Msck txn is aborted"), + ABORT_MIGRATION_TXN(50008, "Managed Migration transaction is aborted"), + + // Replication related aborts + ABORT_DEFAULT_REPL_TXN(50009, " Replication:" + + "Abort default replication transaction"), + ABORT_REPLAYED_REPL_TXN(50010, " Replication:" + + "Abort replayed replication transaction"), + ABORT_REPL_WRITEID_TXN(50011, " Replication:" + + "Abort all the allocated txns so that the mapped write ids are referred as aborted ones."), + ABORT_FETCH_FAILOVER_METADATA(50012, " Replication:" + + "Abort all transactions while trying to fetch failover metadata."), + ABORT_WRITE_TXN_AFTER_TIMEOUT(50013, " Replication:" + + "Abort only write transactions for the db under replication"), + ABORT_ONGOING_TXN_FOR_TARGET_DB(50014, " Replication:" + + "Abort the ongoing transactions(opened prior to failover) for the target database."); + + private final long errorCode; + private final String txnErrorMsg; + + TxnErrorMsg(int errorCode, String txnErrorMsg) { + this.errorCode = errorCode; + this.txnErrorMsg = txnErrorMsg; + } + + public long getErrorCode() { + return errorCode; + } + + public String getTxnErrorMsg() { + return txnErrorMsg; + } + + @Override + public String toString() { + return "TxnLog: TxnErrorMsg{" + + "errorCode=" + errorCode + + ", txnErrorMsg=" + txnErrorMsg +"}"; + } + + public static TxnErrorMsg getErrorMsg(long errorCode) { + for (TxnErrorMsg errorMsg : values()) { + if (errorMsg.getErrorCode() == errorCode) { + return errorMsg; + } + } + return null; + } +} + diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 77863e068ee5..c7d54c73dc14 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -1012,6 +1012,7 @@ private void deleteReplTxnMapEntry(Connection dbConn, long sourceTxnId, String r @RetrySemantics.Idempotent public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException { long txnid = rqst.getTxnid(); + TxnErrorMsg errorMsg; long sourceTxnId = -1; boolean isReplayedReplTxn = TxnType.REPL_CREATED.equals(rqst.getTxn_type()); boolean isHiveReplTxn = rqst.isSetReplPolicy() && TxnType.DEFAULT.equals(rqst.getTxn_type()); @@ -1053,7 +1054,15 @@ public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaExcept } raiseTxnUnexpectedState(status, txnid); } - abortTxns(dbConn, Collections.singletonList(txnid), true, isReplayedReplTxn); + + if (isReplayedReplTxn) { + errorMsg = TxnErrorMsg.ABORT_REPLAYED_REPL_TXN; + } else if (isHiveReplTxn) { + errorMsg = TxnErrorMsg.ABORT_DEFAULT_REPL_TXN; + } else { + errorMsg = TxnErrorMsg.ABORT_ROLLBACK; + } + abortTxns(dbConn, Collections.singletonList(txnid), true, isReplayedReplTxn, errorMsg); if (isReplayedReplTxn) { deleteReplTxnMapEntry(dbConn, sourceTxnId, rqst.getReplPolicy()); @@ -1087,6 +1096,10 @@ public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaExcept @RetrySemantics.Idempotent public void abortTxns(AbortTxnsRequest rqst) throws MetaException { List txnIds = rqst.getTxn_ids(); + TxnErrorMsg txnErrorMsg = TxnErrorMsg.NONE; + if (rqst.isSetErrorCode()) { + txnErrorMsg = TxnErrorMsg.getErrorMsg(rqst.getErrorCode()); + } try { Connection dbConn = null; Statement stmt = null; @@ -1113,7 +1126,7 @@ public void abortTxns(AbortTxnsRequest rqst) throws MetaException { } } } - int numAborted = abortTxns(dbConn, txnIds, false, false); + int numAborted = abortTxns(dbConn, txnIds, false, false, txnErrorMsg); if (numAborted != txnIds.size()) { LOG.warn( "Abort Transactions command only aborted {} out of {} transactions. It's possible that the other" @@ -1514,7 +1527,8 @@ public void commitTxn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbort dbConn.rollback(undoWriteSetForCurrentTxn); LOG.info(msg); //todo: should make abortTxns() write something into TXNS.TXN_META_INFO about this - if (abortTxns(dbConn, Collections.singletonList(txnid), false, isReplayedReplTxn) != 1) { + if (abortTxns(dbConn, Collections.singletonList(txnid), false, isReplayedReplTxn, + TxnErrorMsg.ABORT_WRITE_CONFLICT) != 1) { throw new IllegalStateException(msg + " FAILED!"); } dbConn.commit(); @@ -1878,7 +1892,7 @@ public void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaEx } // Abort all the allocated txns so that the mapped write ids are referred as aborted ones. - int numAborts = abortTxns(dbConn, txnIds, false, false); + int numAborts = abortTxns(dbConn, txnIds, false, false, TxnErrorMsg.ABORT_REPL_WRITEID_TXN); assert(numAborts == numAbortedWrites); } @@ -5076,8 +5090,8 @@ private enum LockAction {ACQUIRE, WAIT, KEEP_LOOKING} private static Map>> jumpTable; private int abortTxns(Connection dbConn, List txnids, - boolean skipCount, boolean isReplReplayed) throws SQLException, MetaException { - return abortTxns(dbConn, txnids, false, skipCount, isReplReplayed); + boolean skipCount, boolean isReplReplayed, TxnErrorMsg errorMsg) throws SQLException, MetaException { + return abortTxns(dbConn, txnids, false, skipCount, isReplReplayed, errorMsg); } /** * TODO: expose this as an operation to client. Useful for streaming API to abort all remaining @@ -5094,12 +5108,13 @@ private int abortTxns(Connection dbConn, List txnids, * @throws SQLException */ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbeat, - boolean skipCount, boolean isReplReplayed) + boolean skipCount, boolean isReplReplayed, TxnErrorMsg errorMsg) throws SQLException, MetaException { Statement stmt = null; if (txnids.isEmpty()) { return 0; } + LOG.info("Trying to abort txns due to : {}", errorMsg); removeTxnsFromMinHistoryLevel(dbConn, txnids); try { stmt = dbConn.createStatement(); @@ -5150,6 +5165,7 @@ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbe if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) { Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_ABORTED_TXNS).inc(txnids.size()); } + LOG.info("Txns are aborted successfully due to : {}", errorMsg); return numAborted; } finally { closeStmt(stmt); @@ -5253,7 +5269,8 @@ private LockResponse checkLock(Connection dbConn, long extLockId, long txnId, bo " since a concurrent committed transaction [" + JavaUtils.txnIdToString(rs.getLong(4)) + "," + rs.getLong(5) + "] has already updated resource '" + resourceName + "'"; LOG.info(msg); - if (abortTxns(dbConn, Collections.singletonList(writeSet.get(0).txnId), false, false) != 1) { + if (abortTxns(dbConn, Collections.singletonList(writeSet.get(0).txnId), false, false, + TxnErrorMsg.ABORT_CONCURRENT) != 1) { throw new IllegalStateException(msg + " FAILED!"); } dbConn.commit(); @@ -5771,7 +5788,7 @@ public void performTimeOuts() { close(rs, stmt, null); int numTxnsAborted = 0; for(List batchToAbort : timedOutTxns) { - if (abortTxns(dbConn, batchToAbort, true, false, false) == batchToAbort.size()) { + if (abortTxns(dbConn, batchToAbort, true, false, false, TxnErrorMsg.ABORT_TIMEOUT) == batchToAbort.size()) { dbConn.commit(); numTxnsAborted += batchToAbort.size(); //todo: add TXNS.COMMENT filed and set it to 'aborted by system due to timeout' diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index dc73c81c16e4..f5655b0c6fe3 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -2516,6 +2516,13 @@ public void abortTxns(List txnids) throws NoSuchTxnException, TException { client.abort_txns(new AbortTxnsRequest(txnids)); } + @Override + public void abortTxns(List txnids, long errorCode) throws NoSuchTxnException, TException { + AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(txnids); + abortTxnsRequest.setErrorCode(errorCode); + client.abort_txns(abortTxnsRequest); + } + @Override public void replTableWriteIdState(String validWriteIdList, String dbName, String tableName, List partNames) throws TException { From 76404e9a0c6d5dd52b44e1d5f7931da225cc2638 Mon Sep 17 00:00:00 2001 From: Sourabh Badhya Date: Wed, 4 Jan 2023 17:44:47 +0530 Subject: [PATCH 2/5] Address review comments - 1 --- .../hive/metastore/txn/TxnErrorMsg.java | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java index 0e324a336636..6b589d1a2364 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java @@ -22,8 +22,8 @@ * The following class represents all the error messages that are handled for aborts. */ public enum TxnErrorMsg { - // 50000 to 59999: Txn Errors Codes. - // Query runtime aborts + // Txn Errors Codes: 50000 - 59999. + // Query runtime aborts - 50000-50999 NONE(50000, "None"), ABORT_QUERY(50001, " Txn aborted by Abort Query Command"), ABORT_CONCURRENT(50002, " Txn aborted due to concurrent committed transaction"), @@ -34,18 +34,18 @@ public enum TxnErrorMsg { ABORT_MSCK_TXN(50007, "Msck txn is aborted"), ABORT_MIGRATION_TXN(50008, "Managed Migration transaction is aborted"), - // Replication related aborts - ABORT_DEFAULT_REPL_TXN(50009, " Replication:" + + // Replication related aborts - 51000 - 51099 + ABORT_DEFAULT_REPL_TXN(51000, " Replication:" + "Abort default replication transaction"), - ABORT_REPLAYED_REPL_TXN(50010, " Replication:" + + ABORT_REPLAYED_REPL_TXN(51001, " Replication:" + "Abort replayed replication transaction"), - ABORT_REPL_WRITEID_TXN(50011, " Replication:" + + ABORT_REPL_WRITEID_TXN(51002, " Replication:" + "Abort all the allocated txns so that the mapped write ids are referred as aborted ones."), - ABORT_FETCH_FAILOVER_METADATA(50012, " Replication:" + + ABORT_FETCH_FAILOVER_METADATA(51003, " Replication:" + "Abort all transactions while trying to fetch failover metadata."), - ABORT_WRITE_TXN_AFTER_TIMEOUT(50013, " Replication:" + + ABORT_WRITE_TXN_AFTER_TIMEOUT(51004, " Replication:" + "Abort only write transactions for the db under replication"), - ABORT_ONGOING_TXN_FOR_TARGET_DB(50014, " Replication:" + + ABORT_ONGOING_TXN_FOR_TARGET_DB(51005, " Replication:" + "Abort the ongoing transactions(opened prior to failover) for the target database."); private final long errorCode; @@ -60,10 +60,6 @@ public long getErrorCode() { return errorCode; } - public String getTxnErrorMsg() { - return txnErrorMsg; - } - @Override public String toString() { return "TxnLog: TxnErrorMsg{" + From c76271aba5561f1428663eae33cad5cd14b82f66 Mon Sep 17 00:00:00 2001 From: Sourabh Badhya Date: Mon, 9 Jan 2023 08:24:48 +0530 Subject: [PATCH 3/5] Address review comments - 2 --- .../SynchronizedMetaStoreClient.java | 23 +--- .../hadoop/hive/ql/lockmgr/DbTxnManager.java | 18 +-- .../apache/hadoop/hive/ql/metadata/Hive.java | 80 +------------ .../hadoop/hive/ql/txn/compactor/Worker.java | 17 +-- .../ql/util/HiveStrictManagedMigration.java | 15 ++- .../thrift/gen-cpp/hive_metastore_types.cpp | 22 ++++ .../gen/thrift/gen-cpp/hive_metastore_types.h | 13 ++- .../hive/metastore/api/AbortTxnRequest.java | 106 +++++++++++++++++- .../gen-php/metastore/AbortTxnRequest.php | 24 ++++ .../thrift/gen-py/hive_metastore/ttypes.py | 14 ++- .../gen/thrift/gen-rb/hive_metastore_types.rb | 4 +- .../hive/metastore/HiveMetaStoreClient.java | 9 +- .../hive/metastore/IMetaStoreClient.java | 17 ++- .../hive/metastore/hive_metastore.proto | 1 + .../src/main/thrift/hive_metastore.thrift | 1 + .../apache/hadoop/hive/metastore/Msck.java | 13 +-- .../hadoop/hive/metastore/txn/TxnHandler.java | 12 +- .../HiveMetaStoreClientPreCatalog.java | 9 +- .../hive/streaming/TransactionBatch.java | 19 ++-- 19 files changed, 246 insertions(+), 171 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java index d57f7c388c8d..1e2737d305c7 100644 --- a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java @@ -22,23 +22,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.CmRecycleResponse; -import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FireEventRequest; -import org.apache.hadoop.hive.metastore.api.FireEventResponse; -import org.apache.hadoop.hive.metastore.api.InvalidObjectException; -import org.apache.hadoop.hive.metastore.api.LockRequest; -import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; -import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; -import org.apache.hadoop.hive.metastore.api.UnknownTableException; -import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest; -import org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest; +import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.thrift.TException; @@ -62,7 +47,9 @@ public synchronized void commitTxn(long txnid) throws TException { } public synchronized void rollbackTxn(long txnid) throws TException { - client.rollbackTxn(txnid); + AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnid); + abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_ROLLBACK.getErrorCode()); + client.rollbackTxn(abortTxnRequest); } public synchronized void heartbeat(long txnid, long lockid) throws TException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index de8b3a45df43..c77a055a1a11 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -29,19 +29,9 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.LockComponentBuilder; import org.apache.hadoop.hive.metastore.LockRequestBuilder; -import org.apache.hadoop.hive.metastore.api.LockComponent; -import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.LockState; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchLockException; -import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; -import org.apache.hadoop.hive.metastore.api.TxnAbortedException; -import org.apache.hadoop.hive.metastore.api.TxnToWriteId; -import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; -import org.apache.hadoop.hive.metastore.api.DataOperationType; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; -import org.apache.hadoop.hive.metastore.api.TxnType; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; +import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryPlan; @@ -593,7 +583,9 @@ public void rollbackTxn() throws LockException { if (replPolicy != null) { getMS().replRollbackTxn(txnId, replPolicy, TxnType.DEFAULT); } else { - getMS().rollbackTxn(txnId); + AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnId); + abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_ROLLBACK.getErrorCode()); + getMS().rollbackTxn(abortTxnRequest); } } catch (NoSuchTxnException e) { LOG.error("Metastore could not find " + JavaUtils.txnIdToString(txnId)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 704e83b1d943..834ab2d56073 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -105,11 +105,7 @@ import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.api.CompactionRequest; -import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest; -import org.apache.hadoop.hive.metastore.api.GetTableRequest; -import org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.ql.io.HdfsUtils; import org.apache.hadoop.hive.metastore.HiveMetaException; import org.apache.hadoop.hive.metastore.HiveMetaHook; @@ -122,79 +118,9 @@ import org.apache.hadoop.hive.metastore.SynchronizedMetaStoreClient; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.AggrStats; -import org.apache.hadoop.hive.metastore.api.AllTableConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; -import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.CompactionResponse; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.DataConnector; -import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.DropDatabaseRequest; -import org.apache.hadoop.hive.metastore.api.EnvironmentContext; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.FireEventRequest; -import org.apache.hadoop.hive.metastore.api.FireEventRequestData; -import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; -import org.apache.hadoop.hive.metastore.api.Function; -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; -import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest; -import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse; -import org.apache.hadoop.hive.metastore.api.GetPartitionRequest; -import org.apache.hadoop.hive.metastore.api.GetPartitionResponse; -import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest; -import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; -import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; -import org.apache.hadoop.hive.metastore.api.HiveObjectRef; -import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.InsertEventRequestData; -import org.apache.hadoop.hive.metastore.api.InvalidOperationException; -import org.apache.hadoop.hive.metastore.api.Materialization; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; -import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.PartitionSpec; -import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD; -import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest; -import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; -import org.apache.hadoop.hive.metastore.api.PrincipalType; -import org.apache.hadoop.hive.metastore.api.PrivilegeBag; -import org.apache.hadoop.hive.metastore.api.Role; -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; -import org.apache.hadoop.hive.metastore.api.SQLAllTableConstraints; -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; -import org.apache.hadoop.hive.metastore.api.SQLForeignKey; -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; -import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.SkewedInfo; -import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMMapping; -import org.apache.hadoop.hive.metastore.api.WMNullablePool; -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMPool; -import org.apache.hadoop.hive.metastore.api.WMResourcePlan; -import org.apache.hadoop.hive.metastore.api.WMTrigger; -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; -import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest; -import org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.ReplChangeManager; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.database.drop.DropDatabaseDesc; import org.apache.hadoop.hive.ql.exec.AbstractFileMergeOperator; @@ -5942,7 +5868,9 @@ public GetOpenTxnsInfoResponse showTransactions() throws HiveException { public void abortTransactions(List txnids, long errorCode) throws HiveException { try { - getMSC().abortTxns(txnids, errorCode); + AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(txnids); + abortTxnsRequest.setErrorCode(errorCode); + getMSC().abortTxns(abortTxnsRequest); } catch (Exception e) { LOG.error("Failed abortTransactions", e); throw new HiveException(e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index c2f5aa87a349..30634bc18e5a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -27,18 +27,7 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.MetaStoreThread; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.DataOperationType; -import org.apache.hadoop.hive.metastore.api.FindNextCompactRequest; -import org.apache.hadoop.hive.metastore.api.LockRequest; -import org.apache.hadoop.hive.metastore.api.LockType; -import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.LockState; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TxnType; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.metrics.AcidMetricService; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; @@ -660,7 +649,9 @@ private void commit() throws TException { */ private void abort() throws TException { if (status == TxnStatus.OPEN) { - msc.abortTxns(Collections.singletonList(txnId), TxnErrorMsg.ABORT_COMPACTION_TXN.getErrorCode()); + AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(Collections.singletonList(txnId)); + abortTxnsRequest.setErrorCode(TxnErrorMsg.ABORT_COMPACTION_TXN.getErrorCode()); + msc.abortTxns(abortTxnsRequest); status = TxnStatus.ABORTED; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java index 1eaa3c319acc..0c7d210187d8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java @@ -52,12 +52,7 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableValidWriteIds; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -1451,7 +1446,9 @@ private TxnCtx generateTxnCtxForAlter( result = new TxnCtx(writeId, validWriteIds, txnId); } finally { if (result == null) { - msc.abortTxns(Lists.newArrayList(txnId), TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode()); + AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(Lists.newArrayList(txnId)); + abortTxnsRequest.setErrorCode(TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode()); + msc.abortTxns(abortTxnsRequest); } } return result; @@ -1467,7 +1464,9 @@ private void closeTxnCtx(TxnCtx txnCtx, IMetaStoreClient msc, boolean isOk) if (isOk) { msc.commitTxn(txnCtx.txnId); } else { - msc.abortTxns(Lists.newArrayList(txnCtx.txnId), TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode()); + AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(Lists.newArrayList(txnCtx.txnId)); + abortTxnsRequest.setErrorCode(TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode()); + msc.abortTxns(abortTxnsRequest); } } catch (TException ex) { throw new HiveException(ex); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index d9e511cf16f7..347ab314dd48 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -21896,6 +21896,11 @@ void AbortTxnRequest::__set_txn_type(const TxnType::type val) { this->txn_type = val; __isset.txn_type = true; } + +void AbortTxnRequest::__set_errorCode(const int64_t val) { + this->errorCode = val; +__isset.errorCode = true; +} std::ostream& operator<<(std::ostream& out, const AbortTxnRequest& obj) { obj.printTo(out); @@ -21951,6 +21956,14 @@ uint32_t AbortTxnRequest::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 4: + if (ftype == ::apache::thrift::protocol::T_I64) { + xfer += iprot->readI64(this->errorCode); + this->__isset.errorCode = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -21984,6 +21997,11 @@ uint32_t AbortTxnRequest::write(::apache::thrift::protocol::TProtocol* oprot) co xfer += oprot->writeI32(static_cast(this->txn_type)); xfer += oprot->writeFieldEnd(); } + if (this->__isset.errorCode) { + xfer += oprot->writeFieldBegin("errorCode", ::apache::thrift::protocol::T_I64, 4); + xfer += oprot->writeI64(this->errorCode); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -21994,6 +22012,7 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b) { swap(a.txnid, b.txnid); swap(a.replPolicy, b.replPolicy); swap(a.txn_type, b.txn_type); + swap(a.errorCode, b.errorCode); swap(a.__isset, b.__isset); } @@ -22001,12 +22020,14 @@ AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other818) { txnid = other818.txnid; replPolicy = other818.replPolicy; txn_type = other818.txn_type; + errorCode = other818.errorCode; __isset = other818.__isset; } AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other819) { txnid = other819.txnid; replPolicy = other819.replPolicy; txn_type = other819.txn_type; + errorCode = other819.errorCode; __isset = other819.__isset; return *this; } @@ -22016,6 +22037,7 @@ void AbortTxnRequest::printTo(std::ostream& out) const { out << "txnid=" << to_string(txnid); out << ", " << "replPolicy="; (__isset.replPolicy ? (out << to_string(replPolicy)) : (out << "")); out << ", " << "txn_type="; (__isset.txn_type ? (out << to_string(txn_type)) : (out << "")); + out << ", " << "errorCode="; (__isset.errorCode ? (out << to_string(errorCode)) : (out << "")); out << ")"; } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h index 5206891c1974..e52a356e36c2 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -8793,9 +8793,10 @@ void swap(OpenTxnsResponse &a, OpenTxnsResponse &b); std::ostream& operator<<(std::ostream& out, const OpenTxnsResponse& obj); typedef struct _AbortTxnRequest__isset { - _AbortTxnRequest__isset() : replPolicy(false), txn_type(false) {} + _AbortTxnRequest__isset() : replPolicy(false), txn_type(false), errorCode(false) {} bool replPolicy :1; bool txn_type :1; + bool errorCode :1; } _AbortTxnRequest__isset; class AbortTxnRequest : public virtual ::apache::thrift::TBase { @@ -8806,7 +8807,8 @@ class AbortTxnRequest : public virtual ::apache::thrift::TBase { AbortTxnRequest() noexcept : txnid(0), replPolicy(), - txn_type(static_cast(0)) { + txn_type(static_cast(0)), + errorCode(0) { } virtual ~AbortTxnRequest() noexcept; @@ -8817,6 +8819,7 @@ class AbortTxnRequest : public virtual ::apache::thrift::TBase { * @see TxnType */ TxnType::type txn_type; + int64_t errorCode; _AbortTxnRequest__isset __isset; @@ -8826,6 +8829,8 @@ class AbortTxnRequest : public virtual ::apache::thrift::TBase { void __set_txn_type(const TxnType::type val); + void __set_errorCode(const int64_t val); + bool operator == (const AbortTxnRequest & rhs) const { if (!(txnid == rhs.txnid)) @@ -8838,6 +8843,10 @@ class AbortTxnRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.txn_type && !(txn_type == rhs.txn_type)) return false; + if (__isset.errorCode != rhs.__isset.errorCode) + return false; + else if (__isset.errorCode && !(errorCode == rhs.errorCode)) + return false; return true; } bool operator != (const AbortTxnRequest &rhs) const { diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java index 37311fc02a62..a45233e0281c 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java @@ -14,6 +14,7 @@ private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)1); private static final org.apache.thrift.protocol.TField REPL_POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("replPolicy", org.apache.thrift.protocol.TType.STRING, (short)2); private static final org.apache.thrift.protocol.TField TXN_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_type", org.apache.thrift.protocol.TType.I32, (short)3); + private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I64, (short)4); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new AbortTxnRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new AbortTxnRequestTupleSchemeFactory(); @@ -21,6 +22,7 @@ private long txnid; // required private @org.apache.thrift.annotation.Nullable java.lang.String replPolicy; // optional private @org.apache.thrift.annotation.Nullable TxnType txn_type; // optional + private long errorCode; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -30,7 +32,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { * * @see TxnType */ - TXN_TYPE((short)3, "txn_type"); + TXN_TYPE((short)3, "txn_type"), + ERROR_CODE((short)4, "errorCode"); private static final java.util.Map byName = new java.util.HashMap(); @@ -52,6 +55,8 @@ public static _Fields findByThriftId(int fieldId) { return REPL_POLICY; case 3: // TXN_TYPE return TXN_TYPE; + case 4: // ERROR_CODE + return ERROR_CODE; default: return null; } @@ -94,8 +99,9 @@ public java.lang.String getFieldName() { // isset id assignments private static final int __TXNID_ISSET_ID = 0; + private static final int __ERRORCODE_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.REPL_POLICY,_Fields.TXN_TYPE}; + private static final _Fields optionals[] = {_Fields.REPL_POLICY,_Fields.TXN_TYPE,_Fields.ERROR_CODE}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -105,6 +111,8 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TXN_TYPE, new org.apache.thrift.meta_data.FieldMetaData("txn_type", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, TxnType.class))); + tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AbortTxnRequest.class, metaDataMap); } @@ -132,6 +140,7 @@ public AbortTxnRequest(AbortTxnRequest other) { if (other.isSetTxn_type()) { this.txn_type = other.txn_type; } + this.errorCode = other.errorCode; } public AbortTxnRequest deepCopy() { @@ -144,6 +153,8 @@ public void clear() { this.txnid = 0; this.replPolicy = null; this.txn_type = null; + setErrorCodeIsSet(false); + this.errorCode = 0; } public long getTxnid() { @@ -224,6 +235,28 @@ public void setTxn_typeIsSet(boolean value) { } } + public long getErrorCode() { + return this.errorCode; + } + + public void setErrorCode(long errorCode) { + this.errorCode = errorCode; + setErrorCodeIsSet(true); + } + + public void unsetErrorCode() { + __isset_bitfield = org.apache.thrift.EncodingUtils.clearBit(__isset_bitfield, __ERRORCODE_ISSET_ID); + } + + /** Returns true if field errorCode is set (has been assigned a value) and false otherwise */ + public boolean isSetErrorCode() { + return org.apache.thrift.EncodingUtils.testBit(__isset_bitfield, __ERRORCODE_ISSET_ID); + } + + public void setErrorCodeIsSet(boolean value) { + __isset_bitfield = org.apache.thrift.EncodingUtils.setBit(__isset_bitfield, __ERRORCODE_ISSET_ID, value); + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case TXNID: @@ -250,6 +283,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case ERROR_CODE: + if (value == null) { + unsetErrorCode(); + } else { + setErrorCode((java.lang.Long)value); + } + break; + } } @@ -265,6 +306,9 @@ public java.lang.Object getFieldValue(_Fields field) { case TXN_TYPE: return getTxn_type(); + case ERROR_CODE: + return getErrorCode(); + } throw new java.lang.IllegalStateException(); } @@ -282,6 +326,8 @@ public boolean isSet(_Fields field) { return isSetReplPolicy(); case TXN_TYPE: return isSetTxn_type(); + case ERROR_CODE: + return isSetErrorCode(); } throw new java.lang.IllegalStateException(); } @@ -326,6 +372,15 @@ public boolean equals(AbortTxnRequest that) { return false; } + boolean this_present_errorCode = true && this.isSetErrorCode(); + boolean that_present_errorCode = true && that.isSetErrorCode(); + if (this_present_errorCode || that_present_errorCode) { + if (!(this_present_errorCode && that_present_errorCode)) + return false; + if (this.errorCode != that.errorCode) + return false; + } + return true; } @@ -343,6 +398,10 @@ public int hashCode() { if (isSetTxn_type()) hashCode = hashCode * 8191 + txn_type.getValue(); + hashCode = hashCode * 8191 + ((isSetErrorCode()) ? 131071 : 524287); + if (isSetErrorCode()) + hashCode = hashCode * 8191 + org.apache.thrift.TBaseHelper.hashCode(errorCode); + return hashCode; } @@ -384,6 +443,16 @@ public int compareTo(AbortTxnRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetErrorCode(), other.isSetErrorCode()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetErrorCode()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorCode, other.errorCode); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -428,6 +497,12 @@ public java.lang.String toString() { } first = false; } + if (isSetErrorCode()) { + if (!first) sb.append(", "); + sb.append("errorCode:"); + sb.append(this.errorCode); + first = false; + } sb.append(")"); return sb.toString(); } @@ -501,6 +576,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AbortTxnRequest str org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 4: // ERROR_CODE + if (schemeField.type == org.apache.thrift.protocol.TType.I64) { + struct.errorCode = iprot.readI64(); + struct.setErrorCodeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -531,6 +614,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AbortTxnRequest st oprot.writeFieldEnd(); } } + if (struct.isSetErrorCode()) { + oprot.writeFieldBegin(ERROR_CODE_FIELD_DESC); + oprot.writeI64(struct.errorCode); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -556,13 +644,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnRequest str if (struct.isSetTxn_type()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetErrorCode()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetReplPolicy()) { oprot.writeString(struct.replPolicy); } if (struct.isSetTxn_type()) { oprot.writeI32(struct.txn_type.getValue()); } + if (struct.isSetErrorCode()) { + oprot.writeI64(struct.errorCode); + } } @Override @@ -570,7 +664,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AbortTxnRequest stru org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; struct.txnid = iprot.readI64(); struct.setTxnidIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(2); + java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.replPolicy = iprot.readString(); struct.setReplPolicyIsSet(true); @@ -579,6 +673,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AbortTxnRequest stru struct.txn_type = org.apache.hadoop.hive.metastore.api.TxnType.findByValue(iprot.readI32()); struct.setTxn_typeIsSet(true); } + if (incoming.get(2)) { + struct.errorCode = iprot.readI64(); + struct.setErrorCodeIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnRequest.php index 589d5d4a5da0..399aeaf626ce 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/AbortTxnRequest.php @@ -37,6 +37,11 @@ class AbortTxnRequest 'type' => TType::I32, 'class' => '\metastore\TxnType', ), + 4 => array( + 'var' => 'errorCode', + 'isRequired' => false, + 'type' => TType::I64, + ), ); /** @@ -51,6 +56,10 @@ class AbortTxnRequest * @var int */ public $txn_type = null; + /** + * @var int + */ + public $errorCode = null; public function __construct($vals = null) { @@ -64,6 +73,9 @@ public function __construct($vals = null) if (isset($vals['txn_type'])) { $this->txn_type = $vals['txn_type']; } + if (isset($vals['errorCode'])) { + $this->errorCode = $vals['errorCode']; + } } } @@ -107,6 +119,13 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 4: + if ($ftype == TType::I64) { + $xfer += $input->readI64($this->errorCode); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -136,6 +155,11 @@ public function write($output) $xfer += $output->writeI32($this->txn_type); $xfer += $output->writeFieldEnd(); } + if ($this->errorCode !== null) { + $xfer += $output->writeFieldBegin('errorCode', TType::I64, 4); + $xfer += $output->writeI64($this->errorCode); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index fd67b0598f41..abfef9c9d34b 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -12520,14 +12520,16 @@ class AbortTxnRequest(object): - txnid - replPolicy - txn_type + - errorCode """ - def __init__(self, txnid=None, replPolicy=None, txn_type=None,): + def __init__(self, txnid=None, replPolicy=None, txn_type=None, errorCode=None,): self.txnid = txnid self.replPolicy = replPolicy self.txn_type = txn_type + self.errorCode = errorCode def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -12553,6 +12555,11 @@ def read(self, iprot): self.txn_type = iprot.readI32() else: iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I64: + self.errorCode = iprot.readI64() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -12575,6 +12582,10 @@ def write(self, oprot): oprot.writeFieldBegin('txn_type', TType.I32, 3) oprot.writeI32(self.txn_type) oprot.writeFieldEnd() + if self.errorCode is not None: + oprot.writeFieldBegin('errorCode', TType.I64, 4) + oprot.writeI64(self.errorCode) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -31022,6 +31033,7 @@ def __ne__(self, other): (1, TType.I64, 'txnid', None, None, ), # 1 (2, TType.STRING, 'replPolicy', 'UTF8', None, ), # 2 (3, TType.I32, 'txn_type', None, None, ), # 3 + (4, TType.I64, 'errorCode', None, None, ), # 4 ) all_structs.append(AbortTxnsRequest) AbortTxnsRequest.thrift_spec = ( diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index c549152fe02b..c5137c0dd13e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -3746,11 +3746,13 @@ class AbortTxnRequest TXNID = 1 REPLPOLICY = 2 TXN_TYPE = 3 + ERRORCODE = 4 FIELDS = { TXNID => {:type => ::Thrift::Types::I64, :name => 'txnid'}, REPLPOLICY => {:type => ::Thrift::Types::STRING, :name => 'replPolicy', :optional => true}, - TXN_TYPE => {:type => ::Thrift::Types::I32, :name => 'txn_type', :optional => true, :enum_class => ::TxnType} + TXN_TYPE => {:type => ::Thrift::Types::I32, :name => 'txn_type', :optional => true, :enum_class => ::TxnType}, + ERRORCODE => {:type => ::Thrift::Types::I64, :name => 'errorCode', :optional => true} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index a25e6c3306ab..00879bc31748 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -4004,6 +4004,11 @@ public void rollbackTxn(long txnid) throws NoSuchTxnException, TException { client.abort_txn(new AbortTxnRequest(txnid)); } + @Override + public void rollbackTxn(AbortTxnRequest abortTxnRequest) throws NoSuchTxnException, TException { + client.abort_txn(abortTxnRequest); + } + @Override public void replRollbackTxn(long srcTxnId, String replPolicy, TxnType txnType) throws NoSuchTxnException, TException { AbortTxnRequest rqst = new AbortTxnRequest(srcTxnId); @@ -4049,9 +4054,7 @@ public void abortTxns(List txnids) throws NoSuchTxnException, TException { } @Override - public void abortTxns(List txnids, long errorCode) throws NoSuchTxnException, TException { - AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(txnids); - abortTxnsRequest.setErrorCode(errorCode); + public void abortTxns(AbortTxnsRequest abortTxnsRequest) throws NoSuchTxnException, TException { client.abort_txns(abortTxnsRequest); } diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 9888e2e8d15f..0ada3af00742 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -3150,6 +3150,18 @@ List getValidWriteIds(List tablesList, String validT */ void rollbackTxn(long txnid) throws NoSuchTxnException, TException; + /** + * Rollback a transaction. This will also unlock any locks associated with + * this transaction. + * @param abortTxnRequest AbortTxnRequest object containing transaction id and + * error codes. + * @throws NoSuchTxnException if the requested transaction does not exist. + * Note that this can result from the transaction having timed out and been + * deleted. + * @throws TException + */ + void rollbackTxn(AbortTxnRequest abortTxnRequest) throws NoSuchTxnException, TException; + /** * Rollback a transaction. This will also unlock any locks associated with * this transaction. @@ -3230,11 +3242,10 @@ void commitTxn(CommitTxnRequest rqst) /** * Abort a list of transactions with additional information of * errorcodes as defined in TxnErrorMsg.java. - * @param txnids List of transaction ids - * @param errorCode Error code specifying the reason for abort. + * @param abortTxnsRequest Information containing txnIds and error codes * @throws TException */ - void abortTxns(List txnids, long errorCode) throws TException; + void abortTxns(AbortTxnsRequest abortTxnsRequest) throws TException; /** * Allocate a per table write ID and associate it with the given transaction. diff --git a/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto b/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto index b7da8520b96a..aa54d942396c 100644 --- a/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto +++ b/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto @@ -1966,6 +1966,7 @@ message AbortTxnRequest { int64 txnid = 1; string repl_policy = 2; TxnType txn_type = 3; + int64 errorCode = 4; } message AbortTxnResponse { diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index ccd9e32880ee..87595f632fe2 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -1076,6 +1076,7 @@ struct AbortTxnRequest { 1: required i64 txnid, 2: optional string replPolicy, 3: optional TxnType txn_type, + 4: optional i64 errorCode, } struct AbortTxnsRequest { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java index 6fb5d1b64cbf..ca616348bacd 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java @@ -36,14 +36,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.api.DataOperationType; -import org.apache.hadoop.hive.metastore.api.LockRequest; -import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.LockState; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.MetastoreException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.*; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -303,7 +296,9 @@ private boolean closeTxn(String qualifiedTableName, boolean success, long txnId) } else { try { LOG.info("txnId: {} failed. Aborting..", txnId); - getMsc().abortTxns(Lists.newArrayList(txnId), TxnErrorMsg.ABORT_MSCK_TXN.getErrorCode()); + AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(Lists.newArrayList(txnId)); + abortTxnsRequest.setErrorCode(TxnErrorMsg.ABORT_MSCK_TXN.getErrorCode()); + getMsc().abortTxns(abortTxnsRequest); } catch (Exception e) { LOG.error("Error while aborting txnId: {} for table: {}", txnId, qualifiedTableName, e); ret = false; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index c7d54c73dc14..0cbddf434251 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -1012,7 +1012,7 @@ private void deleteReplTxnMapEntry(Connection dbConn, long sourceTxnId, String r @RetrySemantics.Idempotent public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException { long txnid = rqst.getTxnid(); - TxnErrorMsg errorMsg; + TxnErrorMsg errorMsg = TxnErrorMsg.NONE; long sourceTxnId = -1; boolean isReplayedReplTxn = TxnType.REPL_CREATED.equals(rqst.getTxn_type()); boolean isHiveReplTxn = rqst.isSetReplPolicy() && TxnType.DEFAULT.equals(rqst.getTxn_type()); @@ -1059,9 +1059,10 @@ public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaExcept errorMsg = TxnErrorMsg.ABORT_REPLAYED_REPL_TXN; } else if (isHiveReplTxn) { errorMsg = TxnErrorMsg.ABORT_DEFAULT_REPL_TXN; - } else { + } else if (rqst.isSetErrorCode() && rqst.getErrorCode() == TxnErrorMsg.ABORT_ROLLBACK.getErrorCode()) { errorMsg = TxnErrorMsg.ABORT_ROLLBACK; } + abortTxns(dbConn, Collections.singletonList(txnid), true, isReplayedReplTxn, errorMsg); if (isReplayedReplTxn) { @@ -1521,7 +1522,7 @@ public void commitTxn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbort if (partitionName != null) { resource.append('/').append(partitionName); } - String msg = "Aborting [" + JavaUtils.txnIdToString(txnid) + "," + commitId + "]" + " due to a write conflict on " + resource + + String msg = "Aborting " + " due to a write conflict on " + resource + " committed by " + committedTxn + " " + rs.getString(7) + "/" + rs.getString(8); //remove WRITE_SET info for current txn since it's about to abort dbConn.rollback(undoWriteSetForCurrentTxn); @@ -5114,7 +5115,7 @@ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbe if (txnids.isEmpty()) { return 0; } - LOG.info("Trying to abort txns due to : {}", errorMsg); + LOG.debug("Trying to abort txns due to : {}. Aborted Transaction IDs : {}", errorMsg, txnids); removeTxnsFromMinHistoryLevel(dbConn, txnids); try { stmt = dbConn.createStatement(); @@ -5165,7 +5166,7 @@ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbe if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) { Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_ABORTED_TXNS).inc(txnids.size()); } - LOG.info("Txns are aborted successfully due to : {}", errorMsg); + LOG.warn("Txns are aborted successfully due to : {}. Aborted Transaction IDs : {}", errorMsg, txnids); return numAborted; } finally { closeStmt(stmt); @@ -5793,7 +5794,6 @@ public void performTimeOuts() { numTxnsAborted += batchToAbort.size(); //todo: add TXNS.COMMENT filed and set it to 'aborted by system due to timeout' Collections.sort(batchToAbort);//easier to read logs - LOG.info("Aborted the following transactions due to timeout: {}", batchToAbort.toString()); } else { //could not abort all txns in this batch - this may happen because in parallel with this diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index f5655b0c6fe3..9bb386c4e84e 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -2473,6 +2473,11 @@ public void rollbackTxn(long txnid) throws NoSuchTxnException, TException { client.abort_txn(new AbortTxnRequest(txnid)); } + @Override + public void rollbackTxn(AbortTxnRequest abortTxnRequest) throws NoSuchTxnException, TException { + client.abort_txn(abortTxnRequest); + } + @Override public void replRollbackTxn(long srcTxnId, String replPolicy, TxnType txnType) throws NoSuchTxnException, TException { AbortTxnRequest rqst = new AbortTxnRequest(srcTxnId); @@ -2517,9 +2522,7 @@ public void abortTxns(List txnids) throws NoSuchTxnException, TException { } @Override - public void abortTxns(List txnids, long errorCode) throws NoSuchTxnException, TException { - AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(txnids); - abortTxnsRequest.setErrorCode(errorCode); + public void abortTxns(AbortTxnsRequest abortTxnsRequest) throws NoSuchTxnException, TException { client.abort_txns(abortTxnsRequest); } diff --git a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java index 9215e1b80cfe..7181adaf958b 100644 --- a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java +++ b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java @@ -24,15 +24,8 @@ import org.apache.hadoop.hive.metastore.LockComponentBuilder; import org.apache.hadoop.hive.metastore.LockRequestBuilder; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.DataOperationType; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; -import org.apache.hadoop.hive.metastore.api.LockRequest; -import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.LockState; -import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; -import org.apache.hadoop.hive.metastore.api.TxnAbortedException; -import org.apache.hadoop.hive.metastore.api.TxnToWriteId; +import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; import org.apache.thrift.TException; @@ -366,7 +359,9 @@ private void abortImpl(boolean abortAllRemaining) throws StreamingException { ? 1 : 0), 0); for (currentTxnIndex = minOpenTxnIndex; currentTxnIndex < txnToWriteIds.size(); currentTxnIndex++) { - conn.getMSC().rollbackTxn(txnToWriteIds.get(currentTxnIndex).getTxnId()); + AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnToWriteIds.get(currentTxnIndex).getTxnId()); + abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_ROLLBACK.getErrorCode()); + conn.getMSC().rollbackTxn(abortTxnRequest); txnStatus[currentTxnIndex] = HiveStreamingConnection.TxnState.ABORTED; } currentTxnIndex--; //since the loop left it == txnToWriteIds.size() @@ -381,7 +376,9 @@ private void abortImpl(boolean abortAllRemaining) throws StreamingException { } long currTxnId = getCurrentTxnId(); if (currTxnId > 0) { - conn.getMSC().rollbackTxn(currTxnId); + AbortTxnRequest abortTxnRequest = new AbortTxnRequest(currTxnId); + abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_ROLLBACK.getErrorCode()); + conn.getMSC().rollbackTxn(abortTxnRequest); txnStatus[currentTxnIndex] = HiveStreamingConnection.TxnState.ABORTED; } } From d7c35bcb4fbae2d6f2f0c13180bccd1327351973 Mon Sep 17 00:00:00 2001 From: Sourabh Badhya Date: Mon, 9 Jan 2023 21:31:28 +0530 Subject: [PATCH 4/5] Address review comments - 3 --- .../SynchronizedMetaStoreClient.java | 19 ++++- .../hadoop/hive/ql/lockmgr/DbTxnManager.java | 14 +++- .../apache/hadoop/hive/ql/metadata/Hive.java | 81 ++++++++++++++++++- .../hadoop/hive/ql/txn/compactor/Worker.java | 20 ++++- .../ql/util/HiveStrictManagedMigration.java | 20 +++-- .../hive/metastore/hive_metastore.proto | 4 +- .../apache/hadoop/hive/metastore/Msck.java | 18 +++-- .../hive/metastore/txn/TxnErrorMsg.java | 50 ++++++------ .../hadoop/hive/metastore/txn/TxnHandler.java | 16 ++-- .../hive/streaming/TransactionBatch.java | 11 ++- 10 files changed, 198 insertions(+), 55 deletions(-) diff --git a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java index 1e2737d305c7..b7aa6fad9e14 100644 --- a/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/metastore/SynchronizedMetaStoreClient.java @@ -22,7 +22,24 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.CmRecycleResponse; +import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FireEventRequest; +import org.apache.hadoop.hive.metastore.api.FireEventResponse; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest; +import org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest; import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.thrift.TException; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index c77a055a1a11..8fc77b09f0cd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -29,7 +29,19 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.LockComponentBuilder; import org.apache.hadoop.hive.metastore.LockRequestBuilder; -import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.LockComponent; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.LockState; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchLockException; +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; +import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.TxnToWriteId; +import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; +import org.apache.hadoop.hive.metastore.api.DataOperationType; +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.TxnType; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.ql.Context; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 834ab2d56073..4acbb580b3a0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -105,7 +105,82 @@ import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.AllTableConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.CmRecycleRequest; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.CompactionRequest; +import org.apache.hadoop.hive.metastore.api.CompactionResponse; +import org.apache.hadoop.hive.metastore.api.CompactionType; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DataConnector; +import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.DropDatabaseRequest; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FireEventRequest; +import org.apache.hadoop.hive.metastore.api.FireEventRequestData; +import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; +import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest; +import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsRequest; +import org.apache.hadoop.hive.metastore.api.GetPartitionNamesPsResponse; +import org.apache.hadoop.hive.metastore.api.GetPartitionRequest; +import org.apache.hadoop.hive.metastore.api.GetPartitionResponse; +import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest; +import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthResponse; +import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; +import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; +import org.apache.hadoop.hive.metastore.api.GetTableRequest; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.HiveObjectType; +import org.apache.hadoop.hive.metastore.api.InsertEventRequestData; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.Materialization; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; +import org.apache.hadoop.hive.metastore.api.PartitionWithoutSD; +import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest; +import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLAllTableConstraints; +import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint; +import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; +import org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest; +import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMMapping; +import org.apache.hadoop.hive.metastore.api.WMNullablePool; +import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMPool; +import org.apache.hadoop.hive.metastore.api.WMResourcePlan; +import org.apache.hadoop.hive.metastore.api.WMTrigger; +import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse; +import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest; +import org.apache.hadoop.hive.metastore.api.WriteNotificationLogBatchRequest; import org.apache.hadoop.hive.ql.io.HdfsUtils; import org.apache.hadoop.hive.metastore.HiveMetaException; import org.apache.hadoop.hive.metastore.HiveMetaHook; @@ -5867,9 +5942,9 @@ public GetOpenTxnsInfoResponse showTransactions() throws HiveException { } public void abortTransactions(List txnids, long errorCode) throws HiveException { + AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(txnids); + abortTxnsRequest.setErrorCode(errorCode); try { - AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(txnids); - abortTxnsRequest.setErrorCode(errorCode); getMSC().abortTxns(abortTxnsRequest); } catch (Exception e) { LOG.error("Failed abortTransactions", e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 30634bc18e5a..1b733d9ac977 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -27,7 +27,19 @@ import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.MetaStoreThread; -import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.CompactionType; +import org.apache.hadoop.hive.metastore.api.DataOperationType; +import org.apache.hadoop.hive.metastore.api.FindNextCompactRequest; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockType; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.LockState; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TxnType; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.metrics.AcidMetricService; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; @@ -649,9 +661,9 @@ private void commit() throws TException { */ private void abort() throws TException { if (status == TxnStatus.OPEN) { - AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(Collections.singletonList(txnId)); - abortTxnsRequest.setErrorCode(TxnErrorMsg.ABORT_COMPACTION_TXN.getErrorCode()); - msc.abortTxns(abortTxnsRequest); + AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnId); + abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_COMPACTION_TXN.getErrorCode()); + msc.rollbackTxn(abortTxnRequest); status = TxnStatus.ABORTED; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java index 0c7d210187d8..1dd9d8bf9db9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java @@ -52,7 +52,13 @@ import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.TransactionalValidationListener; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableValidWriteIds; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -1446,9 +1452,9 @@ private TxnCtx generateTxnCtxForAlter( result = new TxnCtx(writeId, validWriteIds, txnId); } finally { if (result == null) { - AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(Lists.newArrayList(txnId)); - abortTxnsRequest.setErrorCode(TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode()); - msc.abortTxns(abortTxnsRequest); + AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnId); + abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode()); + msc.rollbackTxn(abortTxnRequest); } } return result; @@ -1464,9 +1470,9 @@ private void closeTxnCtx(TxnCtx txnCtx, IMetaStoreClient msc, boolean isOk) if (isOk) { msc.commitTxn(txnCtx.txnId); } else { - AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(Lists.newArrayList(txnCtx.txnId)); - abortTxnsRequest.setErrorCode(TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode()); - msc.abortTxns(abortTxnsRequest); + AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnCtx.txnId); + abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_MIGRATION_TXN.getErrorCode()); + msc.rollbackTxn(abortTxnRequest); } } catch (TException ex) { throw new HiveException(ex); diff --git a/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto b/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto index aa54d942396c..8ef58bc22859 100644 --- a/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto +++ b/standalone-metastore/metastore-common/src/main/protobuf/org/apache/hadoop/hive/metastore/hive_metastore.proto @@ -1966,7 +1966,7 @@ message AbortTxnRequest { int64 txnid = 1; string repl_policy = 2; TxnType txn_type = 3; - int64 errorCode = 4; + optional int64 errorCode = 4; } message AbortTxnResponse { @@ -1974,7 +1974,7 @@ message AbortTxnResponse { message AbortTxnsRequest { repeated int64 txn_ids = 1; - int64 errorCode = 2; + optional int64 errorCode = 2; } message AbortTxnsResponse { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java index ca616348bacd..e79836b86870 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java @@ -36,7 +36,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.DataOperationType; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.LockState; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.MetastoreException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -294,11 +302,11 @@ private boolean closeTxn(String qualifiedTableName, boolean success, long txnId) ret = false; } } else { + LOG.info("txnId: {} failed. Aborting..", txnId); + AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnId); + abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_MSCK_TXN.getErrorCode()); try { - LOG.info("txnId: {} failed. Aborting..", txnId); - AbortTxnsRequest abortTxnsRequest = new AbortTxnsRequest(Lists.newArrayList(txnId)); - abortTxnsRequest.setErrorCode(TxnErrorMsg.ABORT_MSCK_TXN.getErrorCode()); - getMsc().abortTxns(abortTxnsRequest); + getMsc().rollbackTxn(abortTxnRequest); } catch (Exception e) { LOG.error("Error while aborting txnId: {} for table: {}", txnId, qualifiedTableName, e); ret = false; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java index 6b589d1a2364..dd5cdbb0a9e8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java @@ -24,29 +24,29 @@ public enum TxnErrorMsg { // Txn Errors Codes: 50000 - 59999. // Query runtime aborts - 50000-50999 - NONE(50000, "None"), - ABORT_QUERY(50001, " Txn aborted by Abort Query Command"), - ABORT_CONCURRENT(50002, " Txn aborted due to concurrent committed transaction"), - ABORT_WRITE_CONFLICT(50003, " Txn aborted due to write conflicts"), - ABORT_TIMEOUT(50004, " Txn aborted due to heartbeat time-out"), - ABORT_ROLLBACK(50005, "Txn aborted due to rollback"), - ABORT_COMPACTION_TXN(50006, "Compaction txn is aborted"), - ABORT_MSCK_TXN(50007, "Msck txn is aborted"), - ABORT_MIGRATION_TXN(50008, "Managed Migration transaction is aborted"), + NONE(50000, "none"), + ABORT_QUERY(50001, "abort by Query command"), + ABORT_CONCURRENT(50002, "concurrent committed transaction"), + ABORT_WRITE_CONFLICT(50003, "write conflicts"), + ABORT_TIMEOUT(50004, "heartbeat time-out"), + ABORT_ROLLBACK(50005, "rollback"), + ABORT_COMPACTION_TXN(50006, "compaction transaction abort"), + ABORT_MSCK_TXN(50007, "msck transaction abort"), + ABORT_MIGRATION_TXN(50008, "managed Migration transaction abort"), // Replication related aborts - 51000 - 51099 - ABORT_DEFAULT_REPL_TXN(51000, " Replication:" + - "Abort default replication transaction"), - ABORT_REPLAYED_REPL_TXN(51001, " Replication:" + - "Abort replayed replication transaction"), - ABORT_REPL_WRITEID_TXN(51002, " Replication:" + - "Abort all the allocated txns so that the mapped write ids are referred as aborted ones."), - ABORT_FETCH_FAILOVER_METADATA(51003, " Replication:" + - "Abort all transactions while trying to fetch failover metadata."), - ABORT_WRITE_TXN_AFTER_TIMEOUT(51004, " Replication:" + - "Abort only write transactions for the db under replication"), - ABORT_ONGOING_TXN_FOR_TARGET_DB(51005, " Replication:" + - "Abort the ongoing transactions(opened prior to failover) for the target database."); + ABORT_DEFAULT_REPL_TXN(51000, "Replication:" + + "default replication transaction abort"), + ABORT_REPLAYED_REPL_TXN(51001, "Replication:" + + "replayed replication transaction abort"), + ABORT_REPL_WRITEID_TXN(51002, "Replication:" + + "abort of allocated txns for referring mapped write ids as aborted ones"), + ABORT_FETCH_FAILOVER_METADATA(51003, "Replication:" + + "abort of txns while trying to fetch failover metadata"), + ABORT_WRITE_TXN_AFTER_TIMEOUT(51004, "Replication:" + + "abort of write txns for the db under replication"), + ABORT_ONGOING_TXN_FOR_TARGET_DB(51005, "Replication:" + + "abort of ongoing txns(opened prior to failover) for the target database"); private final long errorCode; private final String txnErrorMsg; @@ -62,9 +62,7 @@ public long getErrorCode() { @Override public String toString() { - return "TxnLog: TxnErrorMsg{" + - "errorCode=" + errorCode + - ", txnErrorMsg=" + txnErrorMsg +"}"; + return txnErrorMsg; } public static TxnErrorMsg getErrorMsg(long errorCode) { @@ -75,5 +73,9 @@ public static TxnErrorMsg getErrorMsg(long errorCode) { } return null; } + + public String toSqlString() { + return "'" + this.toString() + "'"; + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index 0cbddf434251..e8421aa1d6f8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -1059,8 +1059,8 @@ public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaExcept errorMsg = TxnErrorMsg.ABORT_REPLAYED_REPL_TXN; } else if (isHiveReplTxn) { errorMsg = TxnErrorMsg.ABORT_DEFAULT_REPL_TXN; - } else if (rqst.isSetErrorCode() && rqst.getErrorCode() == TxnErrorMsg.ABORT_ROLLBACK.getErrorCode()) { - errorMsg = TxnErrorMsg.ABORT_ROLLBACK; + } else if (rqst.isSetErrorCode()) { + errorMsg = TxnErrorMsg.getErrorMsg(rqst.getErrorCode()); } abortTxns(dbConn, Collections.singletonList(txnid), true, isReplayedReplTxn, errorMsg); @@ -1522,7 +1522,7 @@ public void commitTxn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbort if (partitionName != null) { resource.append('/').append(partitionName); } - String msg = "Aborting " + " due to a write conflict on " + resource + + String msg = "Aborting [" + JavaUtils.txnIdToString(txnid) + "," + commitId + "]" + " due to a write conflict on " + resource + " committed by " + committedTxn + " " + rs.getString(7) + "/" + rs.getString(8); //remove WRITE_SET info for current txn since it's about to abort dbConn.rollback(undoWriteSetForCurrentTxn); @@ -5115,7 +5115,8 @@ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbe if (txnids.isEmpty()) { return 0; } - LOG.debug("Trying to abort txns due to : {}. Aborted Transaction IDs : {}", errorMsg, txnids); + Collections.sort(txnids); + LOG.debug("Aborting {} transactions {} due to {}", txnids.size(), txnids, errorMsg); removeTxnsFromMinHistoryLevel(dbConn, txnids); try { stmt = dbConn.createStatement(); @@ -5127,7 +5128,9 @@ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbe // add update txns queries to query list prefix.append("UPDATE \"TXNS\" SET \"TXN_STATE\" = ").append(TxnStatus.ABORTED) - .append(" WHERE \"TXN_STATE\" = ").append(TxnStatus.OPEN).append(" AND "); + .append(" , ").append("\"TXN_META_INFO\" = ").append(errorMsg.toSqlString()) + .append(" WHERE \"TXN_STATE\" = ").append(TxnStatus.OPEN) + .append(" AND "); if (checkHeartbeat) { suffix.append(" AND \"TXN_LAST_HEARTBEAT\" < ") .append(getEpochFn(dbProduct)).append("-").append(timeout); @@ -5166,7 +5169,7 @@ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbe if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) { Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_ABORTED_TXNS).inc(txnids.size()); } - LOG.warn("Txns are aborted successfully due to : {}. Aborted Transaction IDs : {}", errorMsg, txnids); + LOG.warn("Aborted {} transactions {} due to {}", txnids.size(), txnids, errorMsg); return numAborted; } finally { closeStmt(stmt); @@ -5793,7 +5796,6 @@ public void performTimeOuts() { dbConn.commit(); numTxnsAborted += batchToAbort.size(); //todo: add TXNS.COMMENT filed and set it to 'aborted by system due to timeout' - Collections.sort(batchToAbort);//easier to read logs } else { //could not abort all txns in this batch - this may happen because in parallel with this diff --git a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java index 7181adaf958b..b3c83b95b069 100644 --- a/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java +++ b/streaming/src/java/org/apache/hive/streaming/TransactionBatch.java @@ -24,7 +24,16 @@ import org.apache.hadoop.hive.metastore.LockComponentBuilder; import org.apache.hadoop.hive.metastore.LockRequestBuilder; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.DataOperationType; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.LockState; +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; +import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; From d4223fa786d774e8528cc94ac305473781a797f6 Mon Sep 17 00:00:00 2001 From: Sourabh Badhya Date: Tue, 10 Jan 2023 10:23:06 +0530 Subject: [PATCH 5/5] Address review comments - 4 --- .../apache/hadoop/hive/metastore/Msck.java | 1 - .../hive/metastore/txn/TxnErrorMsg.java | 22 +++++++------- .../hadoop/hive/metastore/txn/TxnHandler.java | 29 +++++++++---------- 3 files changed, 25 insertions(+), 27 deletions(-) diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java index e79836b86870..926875e514ca 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java @@ -57,7 +57,6 @@ import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.Lists; /** * Msck repairs table metadata specifically related to partition information to be in-sync with directories in table diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java index dd5cdbb0a9e8..6d7181db667b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnErrorMsg.java @@ -25,14 +25,14 @@ public enum TxnErrorMsg { // Txn Errors Codes: 50000 - 59999. // Query runtime aborts - 50000-50999 NONE(50000, "none"), - ABORT_QUERY(50001, "abort by Query command"), + ABORT_QUERY(50001, "abort by query command"), ABORT_CONCURRENT(50002, "concurrent committed transaction"), ABORT_WRITE_CONFLICT(50003, "write conflicts"), ABORT_TIMEOUT(50004, "heartbeat time-out"), ABORT_ROLLBACK(50005, "rollback"), ABORT_COMPACTION_TXN(50006, "compaction transaction abort"), ABORT_MSCK_TXN(50007, "msck transaction abort"), - ABORT_MIGRATION_TXN(50008, "managed Migration transaction abort"), + ABORT_MIGRATION_TXN(50008, "managed migration transaction abort"), // Replication related aborts - 51000 - 51099 ABORT_DEFAULT_REPL_TXN(51000, "Replication:" + @@ -49,11 +49,11 @@ public enum TxnErrorMsg { "abort of ongoing txns(opened prior to failover) for the target database"); private final long errorCode; - private final String txnErrorMsg; + private final String errorMsg; - TxnErrorMsg(int errorCode, String txnErrorMsg) { + TxnErrorMsg(int errorCode, String errorMsg) { this.errorCode = errorCode; - this.txnErrorMsg = txnErrorMsg; + this.errorMsg = errorMsg; } public long getErrorCode() { @@ -62,16 +62,16 @@ public long getErrorCode() { @Override public String toString() { - return txnErrorMsg; + return errorMsg; } - public static TxnErrorMsg getErrorMsg(long errorCode) { - for (TxnErrorMsg errorMsg : values()) { - if (errorMsg.getErrorCode() == errorCode) { - return errorMsg; + public static TxnErrorMsg getTxnErrorMsg(long errorCode) { + for (TxnErrorMsg txnErrorMsg : values()) { + if (txnErrorMsg.getErrorCode() == errorCode) { + return txnErrorMsg; } } - return null; + return TxnErrorMsg.NONE; } public String toSqlString() { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index e8421aa1d6f8..b42b25db95f4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -1012,7 +1012,7 @@ private void deleteReplTxnMapEntry(Connection dbConn, long sourceTxnId, String r @RetrySemantics.Idempotent public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException { long txnid = rqst.getTxnid(); - TxnErrorMsg errorMsg = TxnErrorMsg.NONE; + TxnErrorMsg txnErrorMsg = TxnErrorMsg.NONE; long sourceTxnId = -1; boolean isReplayedReplTxn = TxnType.REPL_CREATED.equals(rqst.getTxn_type()); boolean isHiveReplTxn = rqst.isSetReplPolicy() && TxnType.DEFAULT.equals(rqst.getTxn_type()); @@ -1056,14 +1056,14 @@ public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaExcept } if (isReplayedReplTxn) { - errorMsg = TxnErrorMsg.ABORT_REPLAYED_REPL_TXN; + txnErrorMsg = TxnErrorMsg.ABORT_REPLAYED_REPL_TXN; } else if (isHiveReplTxn) { - errorMsg = TxnErrorMsg.ABORT_DEFAULT_REPL_TXN; + txnErrorMsg = TxnErrorMsg.ABORT_DEFAULT_REPL_TXN; } else if (rqst.isSetErrorCode()) { - errorMsg = TxnErrorMsg.getErrorMsg(rqst.getErrorCode()); + txnErrorMsg = TxnErrorMsg.getTxnErrorMsg(rqst.getErrorCode()); } - abortTxns(dbConn, Collections.singletonList(txnid), true, isReplayedReplTxn, errorMsg); + abortTxns(dbConn, Collections.singletonList(txnid), true, isReplayedReplTxn, txnErrorMsg); if (isReplayedReplTxn) { deleteReplTxnMapEntry(dbConn, sourceTxnId, rqst.getReplPolicy()); @@ -1099,7 +1099,7 @@ public void abortTxns(AbortTxnsRequest rqst) throws MetaException { List txnIds = rqst.getTxn_ids(); TxnErrorMsg txnErrorMsg = TxnErrorMsg.NONE; if (rqst.isSetErrorCode()) { - txnErrorMsg = TxnErrorMsg.getErrorMsg(rqst.getErrorCode()); + txnErrorMsg = TxnErrorMsg.getTxnErrorMsg(rqst.getErrorCode()); } try { Connection dbConn = null; @@ -5091,8 +5091,8 @@ private enum LockAction {ACQUIRE, WAIT, KEEP_LOOKING} private static Map>> jumpTable; private int abortTxns(Connection dbConn, List txnids, - boolean skipCount, boolean isReplReplayed, TxnErrorMsg errorMsg) throws SQLException, MetaException { - return abortTxns(dbConn, txnids, false, skipCount, isReplReplayed, errorMsg); + boolean skipCount, boolean isReplReplayed, TxnErrorMsg txnErrorMsg) throws SQLException, MetaException { + return abortTxns(dbConn, txnids, false, skipCount, isReplReplayed, txnErrorMsg); } /** * TODO: expose this as an operation to client. Useful for streaming API to abort all remaining @@ -5109,14 +5109,14 @@ private int abortTxns(Connection dbConn, List txnids, * @throws SQLException */ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbeat, - boolean skipCount, boolean isReplReplayed, TxnErrorMsg errorMsg) + boolean skipCount, boolean isReplReplayed, TxnErrorMsg txnErrorMsg) throws SQLException, MetaException { Statement stmt = null; if (txnids.isEmpty()) { return 0; } Collections.sort(txnids); - LOG.debug("Aborting {} transactions {} due to {}", txnids.size(), txnids, errorMsg); + LOG.debug("Aborting {} transaction(s) {} due to {}", txnids.size(), txnids, txnErrorMsg); removeTxnsFromMinHistoryLevel(dbConn, txnids); try { stmt = dbConn.createStatement(); @@ -5128,9 +5128,8 @@ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbe // add update txns queries to query list prefix.append("UPDATE \"TXNS\" SET \"TXN_STATE\" = ").append(TxnStatus.ABORTED) - .append(" , ").append("\"TXN_META_INFO\" = ").append(errorMsg.toSqlString()) - .append(" WHERE \"TXN_STATE\" = ").append(TxnStatus.OPEN) - .append(" AND "); + .append(" , \"TXN_META_INFO\" = ").append(txnErrorMsg.toSqlString()) + .append(" WHERE \"TXN_STATE\" = ").append(TxnStatus.OPEN).append(" AND "); if (checkHeartbeat) { suffix.append(" AND \"TXN_LAST_HEARTBEAT\" < ") .append(getEpochFn(dbProduct)).append("-").append(timeout); @@ -5169,7 +5168,7 @@ private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbe if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) { Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_ABORTED_TXNS).inc(txnids.size()); } - LOG.warn("Aborted {} transactions {} due to {}", txnids.size(), txnids, errorMsg); + LOG.warn("Aborted {} transaction(s) {} due to {}", txnids.size(), txnids, txnErrorMsg); return numAborted; } finally { closeStmt(stmt); @@ -5806,7 +5805,7 @@ public void performTimeOuts() { dbConn.rollback(); } } - LOG.info("Aborted {} transactions due to timeout", numTxnsAborted); + LOG.info("Aborted {} transaction(s) due to timeout", numTxnsAborted); if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) { Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_TIMED_OUT_TXNS).inc(numTxnsAborted); }