-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[CIR] Upstream __sync_<OP>_and_fetch builtins #168347
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
|
@llvm/pr-subscribers-clangir @llvm/pr-subscribers-clang Author: Hendrik Hübner (HendrikHuebner) ChangesThis PR upstreams support for several Patch is 56.18 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/168347.diff 6 Files Affected:
diff --git a/clang/lib/CIR/CodeGen/Address.h b/clang/lib/CIR/CodeGen/Address.h
index c8ce530a7b0d3..02a24a86b3c84 100644
--- a/clang/lib/CIR/CodeGen/Address.h
+++ b/clang/lib/CIR/CodeGen/Address.h
@@ -45,8 +45,12 @@ class Address {
public:
Address(mlir::Value pointer, mlir::Type elementType,
clang::CharUnits alignment)
- : pointerAndKnownNonNull(pointer, false), elementType(elementType),
- alignment(alignment) {
+ : Address(pointer, elementType, alignment, false) {}
+
+ Address(mlir::Value pointer, mlir::Type elementType,
+ clang::CharUnits alignment, bool pointerAndKnownNonNull)
+ : pointerAndKnownNonNull(pointer, pointerAndKnownNonNull),
+ elementType(elementType), alignment(alignment) {
assert(pointer && "Pointer cannot be null");
assert(elementType && "Element type cannot be null");
assert(!alignment.isZero() && "Alignment cannot be zero");
@@ -77,6 +81,13 @@ class Address {
return Address(newPtr, getElementType(), getAlignment());
}
+ /// Return address with different alignment, but same pointer and element
+ /// type.
+ Address withAlignment(clang::CharUnits newAlignment) const {
+ return Address(getPointer(), getElementType(), newAlignment,
+ isKnownNonNull());
+ }
+
/// Return address with different element type, a bitcast pointer, and
/// the same alignment.
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const;
@@ -133,6 +144,19 @@ class Address {
template <typename OpTy> OpTy getDefiningOp() const {
return mlir::dyn_cast_or_null<OpTy>(getDefiningOp());
}
+
+ /// Whether the pointer is known not to be null.
+ bool isKnownNonNull() const {
+ assert(isValid() && "Invalid address");
+ return static_cast<bool>(pointerAndKnownNonNull.getInt());
+ }
+
+ /// Set the non-null bit.
+ Address setKnownNonNull() {
+ assert(isValid() && "Invalid address");
+ pointerAndKnownNonNull.setInt(true);
+ return *this;
+ }
};
} // namespace clang::CIRGen
diff --git a/clang/lib/CIR/CodeGen/CIREHScopeStack.h b/clang/lib/CIR/CodeGen/CIREHScopeStack.h
new file mode 100644
index 0000000000000..c7b86a06339a8
--- /dev/null
+++ b/clang/lib/CIR/CodeGen/CIREHScopeStack.h
@@ -0,0 +1,279 @@
+//===-- EHScopeStack.h - Stack for cleanup CIR generation -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes should be the minimum interface required for other parts of
+// CIR CodeGen to emit cleanups. The implementation is in CIRGenCleanup.cpp and
+// other implemenentation details that are not widely needed are in
+// CIRGenCleanup.h.
+//
+// TODO(cir): this header should be shared between LLVM and CIR codegen.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+#define CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
+
+#include "clang/CIR/Dialect/IR/CIRDialect.h"
+#include "clang/lib/CodeGen/EHScopeStack.h"
+
+namespace clang::CIRGen {
+
+class CIRGenFunction;
+
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ mlir::Block *optimisticBranchBlock = nullptr;
+
+ /// The ultimate destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ mlir::Block *destination = nullptr;
+
+ /// The destination index value.
+ unsigned destinationIndex = 0;
+
+ /// The initial branch of the fixup.
+ cir::BrOp initialBranch = {};
+};
+
+enum CleanupKind : unsigned {
+ /// Denotes a cleanup that should run when a scope is exited using exceptional
+ /// control flow (a throw statement leading to stack unwinding, ).
+ EHCleanup = 0x1,
+
+ /// Denotes a cleanup that should run when a scope is exited using normal
+ /// control flow (falling off the end of the scope, return, goto, ...).
+ NormalCleanup = 0x2,
+
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ LifetimeMarker = 0x8,
+ NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup,
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+ friend class CIRGenFunction;
+
+public:
+ // TODO(ogcg): Switch to alignof(uint64_t) instead of 8
+ enum { ScopeStackAlignment = 8 };
+
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from startOfData to endOfBuffer.
+ ptrdiff_t size = -1;
+
+ explicit stable_iterator(ptrdiff_t size) : size(size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() = default;
+
+ bool isValid() const { return size >= 0; }
+
+ /// Returns true if this scope encloses I.
+ /// Returns false if I is invalid.
+ /// This scope must be valid.
+ bool encloses(stable_iterator other) const { return size <= other.size; }
+
+ /// Returns true if this scope strictly encloses I: that is,
+ /// if it encloses I and is not I.
+ /// Returns false is I is invalid.
+ /// This scope must be valid.
+ bool strictlyEncloses(stable_iterator I) const { return size < I.size; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.size == B.size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.size != B.size;
+ }
+ };
+
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
+ ///
+ /// Cleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class LLVM_MOVABLE_POLYMORPHIC_TYPE Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
+
+ public:
+ Cleanup(const Cleanup &) = default;
+ Cleanup(Cleanup &&) {}
+ Cleanup() = default;
+
+ virtual ~Cleanup() = default;
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param flags cleanup kind.
+ virtual void emit(CIRGenFunction &cgf) = 0;
+ };
+
+private:
+ // The implementation for this class is in CIRGenCleanup.h and
+ // CIRGenCleanup.cpp; the definition is here because it's used as a
+ // member of CIRGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ std::unique_ptr<char[]> startOfBuffer;
+
+ /// The end of the buffer.
+ char *endOfBuffer = nullptr;
+
+ /// The first valid entry in the buffer.
+ char *startOfData = nullptr;
+
+ /// The innermost normal cleanup on the stack.
+ stable_iterator innermostNormalCleanup = stable_end();
+
+ /// The innermost EH scope on the stack.
+ stable_iterator innermostEHScope = stable_end();
+
+ /// The CGF this Stack belong to
+ CIRGenFunction *cgf = nullptr;
+
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ llvm::SmallVector<BranchFixup> branchFixups;
+
+ // This class uses a custom allocator for maximum efficiency because cleanups
+ // are allocated and freed very frequently. It's basically a bump pointer
+ // allocator, but we can't use LLVM's BumpPtrAllocator because we use offsets
+ // into the buffer as stable iterators.
+ char *allocate(size_t size);
+ void deallocate(size_t size);
+
+ void *pushCleanup(CleanupKind kind, size_t dataSize);
+
+public:
+ EHScopeStack() = default;
+ ~EHScopeStack() = default;
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class... As> void pushCleanup(CleanupKind kind, As... a) {
+ static_assert(alignof(T) <= ScopeStackAlignment,
+ "Cleanup's alignment is too large.");
+ void *buffer = pushCleanup(kind, sizeof(T));
+ [[maybe_unused]] Cleanup *obj = new (buffer) T(a...);
+ }
+
+ void setCGF(CIRGenFunction *inCGF) { cgf = inCGF; }
+
+ /// Pops a cleanup scope off the stack. This is private to CIRGenCleanup.cpp.
+ void popCleanup();
+
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned numHandlers);
+
+ /// Pops a catch scope off the stack. This is private to CIRGenException.cpp.
+ void popCatch();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return startOfData == endOfBuffer; }
+
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return innermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return innermostNormalCleanup;
+ }
+ stable_iterator getInnermostActiveNormalCleanup() const;
+
+ stable_iterator getInnermostEHScope() const { return innermostEHScope; }
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Returns an iterator pointing to the outermost EH scope.
+ iterator end() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(endOfBuffer - startOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() { return stable_iterator(0); }
+
+ /// Turn a stable reference to a scope depth into a unstable pointer
+ /// to the EH stack.
+ iterator find(stable_iterator savePoint) const;
+
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ branchFixups.push_back(BranchFixup());
+ return branchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return branchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned i) {
+ assert(i < getNumBranchFixups());
+ return branchFixups[i];
+ }
+
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
+};
+
+} // namespace clang::CIRGen
+
+#endif // CLANG_LIB_CIR_CODEGEN_EHSCOPESTACK_H
diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
index 77f19343653db..a0a350ebe031c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/Basic/Builtins.h"
+#include "clang/Basic/DiagnosticFrontend.h"
#include "clang/CIR/Dialect/IR/CIRTypes.h"
#include "clang/CIR/MissingFeatures.h"
#include "llvm/Support/ErrorHandling.h"
@@ -58,6 +59,107 @@ static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e,
return RValue::get(result);
}
+/// Emit the conversions required to turn the given value into an
+/// integer of the given size.
+static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
+ cir::IntType intType) {
+ v = cgf.emitToMemory(v, t);
+
+ if (isa<cir::PointerType>(v.getType()))
+ return cgf.getBuilder().createPtrToInt(v, intType);
+
+ assert(v.getType() == intType);
+ return v;
+}
+
+static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
+ mlir::Type resultType) {
+ v = cgf.emitFromMemory(v, t);
+
+ if (isa<cir::PointerType>(resultType))
+ return cgf.getBuilder().createIntToPtr(v, resultType);
+
+ assert(v.getType() == resultType);
+ return v;
+}
+
+static Address checkAtomicAlignment(CIRGenFunction &cgf, const CallExpr *e) {
+ ASTContext &astContext = cgf.getContext();
+ Address ptr = cgf.emitPointerWithAlignment(e->getArg(0));
+ unsigned bytes =
+ isa<cir::PointerType>(ptr.getElementType())
+ ? astContext.getTypeSizeInChars(astContext.VoidPtrTy).getQuantity()
+ : cgf.cgm.getDataLayout().getTypeSizeInBits(ptr.getElementType()) / 8;
+ unsigned align = ptr.getAlignment().getQuantity();
+ if (align % bytes != 0) {
+ DiagnosticsEngine &diags = cgf.cgm.getDiags();
+ diags.Report(e->getBeginLoc(), diag::warn_sync_op_misaligned);
+ // Force address to be at least naturally-aligned.
+ return ptr.withAlignment(CharUnits::fromQuantity(bytes));
+ }
+ return ptr;
+}
+
+/// Utility to insert an atomic instruction based on Intrinsic::ID
+/// and the expression node.
+static mlir::Value makeBinaryAtomicValue(
+ CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr,
+ mlir::Value *neededValP = nullptr,
+ cir::MemOrder ordering = cir::MemOrder::SequentiallyConsistent) {
+
+ QualType type = expr->getType();
+ QualType ptrType = expr->getArg(0)->getType();
+
+ assert(ptrType->isPointerType());
+ assert(
+ cgf.getContext().hasSameUnqualifiedType(type, ptrType->getPointeeType()));
+ assert(cgf.getContext().hasSameUnqualifiedType(type,
+ expr->getArg(1)->getType()));
+
+ Address destAddr = checkAtomicAlignment(cgf, expr);
+ CIRGenBuilderTy &builder = cgf.getBuilder();
+ cir::IntType intType =
+ ptrType->getPointeeType()->isUnsignedIntegerType()
+ ? builder.getUIntNTy(cgf.getContext().getTypeSize(type))
+ : builder.getSIntNTy(cgf.getContext().getTypeSize(type));
+ mlir::Value val = cgf.emitScalarExpr(expr->getArg(1));
+ mlir::Type valueType = val.getType();
+ val = emitToInt(cgf, val, type, intType);
+
+ // This output argument is needed for post atomic fetch operations
+ // that calculate the result of the operation as return value of
+ // <binop>_and_fetch builtins. The `AtomicFetch` operation only updates the
+ // memory location and returns the old value.
+ if (neededValP) {
+ *neededValP = val;
+ }
+
+ auto rmwi = cir::AtomicFetchOp::create(
+ builder, cgf.getLoc(expr->getSourceRange()), destAddr.emitRawPointer(),
+ val, kind, ordering, false, /* is volatile */
+ true); /* fetch first */
+ return emitFromInt(cgf, rmwi->getResult(0), type, valueType);
+}
+
+static RValue emitBinaryAtomicPost(CIRGenFunction &cgf,
+ cir::AtomicFetchKind atomicOpkind,
+ const CallExpr *e, cir::BinOpKind binopKind,
+ bool invert = false) {
+ mlir::Value val;
+ clang::QualType typ = e->getType();
+ mlir::Value result = makeBinaryAtomicValue(cgf, atomicOpkind, e, &val);
+ clang::CIRGen::CIRGenBuilderTy &builder = cgf.getBuilder();
+ result = cir::BinOp::create(builder, result.getLoc(), binopKind, result, val);
+
+ if (invert) {
+ result = cir::UnaryOp::create(builder, result.getLoc(),
+ cir::UnaryOpKind::Not, result);
+ }
+
+ result = emitFromInt(cgf, result, typ, val.getType());
+ return RValue::get(result);
+}
+
RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
mlir::Value input = emitScalarExpr(e->getArg(0));
mlir::Value amount = emitScalarExpr(e->getArg(1));
@@ -520,6 +622,172 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl &gd, unsigned builtinID,
cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
return RValue::get(nullptr);
}
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_nand:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_nand_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_swap:
+ llvm_unreachable("Shouldn't make it through sema");
+
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ llvm_unreachable("BI__sync_fetch_and_add NYI");
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ llvm_unreachable("BI__sync_fetch_and_sub NYI");
+
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ llvm_unreachable("BI__sync_fetch_and_or NYI");
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ llvm_unreachable("BI__sync_fetch_and_and NYI");
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ llvm_unreachable("BI__sync_fetch_and_xor NYI");
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
+ llvm_unreachable("BI__sync_fetch_and_nand NYI");
+
+ // Clang extensions: not overloaded yet.
+ case Builtin::BI__sync_fetch_and_min:
+ llvm_unreachable("BI__sync_fetch_and_min NYI");
+ case Builtin::BI__sync_fetch_and_max:
+ llvm_unreachable("BI__sync_fetch_and_max NYI");
+ case Builtin::BI__sync_fetch_and_umin:
+ llvm_unreachable("BI__sync_fetch_and_umin NYI");
+ case Builtin::BI__sync_fetch_and_umax:
+ llvm_unreachable("BI__sync_fetch_and_umax NYI");
+
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sy...
[truncated]
|
ca2031e to
661817d
Compare
| cir::IntType intType) { | ||
| v = cgf.emitToMemory(v, t); | ||
|
|
||
| if (isa<cir::PointerType>(v.getType())) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| if (isa<cir::PointerType>(v.getType())) | |
| if (mlir::isa<cir::PointerType>(v.getType())) |
Making this explicit is a nice hint to readers of the code who aren't familiar with MLIR, letting them know that this can be subtly different (specifically, llvm::isa<> only works with pointers, whereas this is acting on an object that wraps a pointer).
| mlir::Type resultType) { | ||
| v = cgf.emitFromMemory(v, t); | ||
|
|
||
| if (isa<cir::PointerType>(resultType)) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| if (isa<cir::PointerType>(resultType)) | |
| if (mlir::isa<cir::PointerType>(resultType)) |
| unsigned bytes = | ||
| isa<cir::PointerType>(ptr.getElementType()) | ||
| ? astContext.getTypeSizeInChars(astContext.VoidPtrTy).getQuantity() | ||
| : cgf.cgm.getDataLayout().getTypeSizeInBits(ptr.getElementType()) / 8; |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's not clear to me why we'd get type size in bits and then divide as opposed to just calling getTypeSize. Classic codegen uses DL.getTypeStoreSize but the MLIR DataLayout interface doesn't seem to have an equivalent function.
| // that calculate the result of the operation as return value of | ||
| // <binop>_and_fetch builtins. The `AtomicFetch` operation only updates the | ||
| // memory location and returns the old value. | ||
| if (neededValP) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| clang::CIRGen::CIRGenBuilderTy &builder = cgf.getBuilder(); | ||
| result = cir::BinOp::create(builder, result.getLoc(), binopKind, result, val); | ||
|
|
||
| if (invert) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
No brace here.
| case Builtin::BI__sync_fetch_and_add_4: | ||
| case Builtin::BI__sync_fetch_and_add_8: | ||
| case Builtin::BI__sync_fetch_and_add_16: | ||
| llvm_unreachable("BI__sync_fetch_and_add NYI"); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| llvm_unreachable("BI__sync_fetch_and_add NYI"); | |
| cgm.errorNYI(e->getSourceRange(), "BI__sync_fetch_and_add"); |
Similarly with the other NYIs in this PR.
| return callResult; | ||
| } | ||
|
|
||
| // TODO: this can also be abstrated into common AST helpers |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This has been done. There is now a hasBooleanRepresentation method on the Clang Type class.
| } | ||
|
|
||
| mlir::Value CIRGenFunction::emitFromMemory(mlir::Value value, QualType ty) { | ||
| if (!ty->isBooleanType() && hasBooleanRepresentation(ty)) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| if (!ty->isBooleanType() && hasBooleanRepresentation(ty)) { | |
| if (!ty->isBooleanType() && ty->hasBooleanRepresentation()) { |
| return value; | ||
| } | ||
|
|
||
| mlir::Value CIRGenFunction::emitFromMemory(mlir::Value value, QualType ty) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| // LLVM: [[RES0:%.*]] = atomicrmw add ptr %{{.*}}, i8 [[VAL0]] seq_cst, align 1 | ||
| // LLVM: [[RET0:%.*]] = add i8 [[RES0]], [[VAL0]] | ||
| // LLVM: store i8 [[RET0]], ptr %{{.*}}, align 1 | ||
| sc = __sync_add_and_fetch(&sc, uc); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please add OGCG checks.
bcardosolopes
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Seems like you have been improving atomics support upstream, thanks for working on that. Given that one of the other PRs is basing off an older version of the incubator, please make sure this one is also up-to-date. I'd rather see this PR split in two (1) just add the NYI skeletons + Address new methods and (2) the actual new functionality.
This PR upstreams support for several
__sync_<OP>_and_fetchbuiltins. Additionally, some needed helper methods are added.