@@ -399,20 +399,14 @@ static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e,
399399static void emitAtomicOp (CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
400400 Address ptr, Address val1, Address val2,
401401 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
402- cir::MemOrder order) {
403- std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel ();
404- if (scopeModel) {
405- assert (!cir::MissingFeatures::atomicScope ());
406- cgf.cgm .errorNYI (expr->getSourceRange (), " emitAtomicOp: atomic scope" );
407- return ;
408- }
409-
402+ cir::MemOrder order, cir::SyncScopeKind scope) {
410403 assert (!cir::MissingFeatures::atomicSyncScopeID ());
411404 llvm::StringRef opName;
412405
413406 CIRGenBuilderTy &builder = cgf.getBuilder ();
414407 mlir::Location loc = cgf.getLoc (expr->getSourceRange ());
415408 auto orderAttr = cir::MemOrderAttr::get (builder.getContext (), order);
409+ auto scopeAttr = cir::SyncScopeKindAttr::get (builder.getContext (), scope);
416410 cir::AtomicFetchKindAttr fetchAttr;
417411 bool fetchFirst = true ;
418412
@@ -446,13 +440,14 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
446440
447441 case AtomicExpr::AO__c11_atomic_load:
448442 case AtomicExpr::AO__atomic_load_n:
449- case AtomicExpr::AO__atomic_load: {
443+ case AtomicExpr::AO__atomic_load:
444+ case AtomicExpr::AO__scoped_atomic_load_n:
445+ case AtomicExpr::AO__scoped_atomic_load: {
450446 cir::LoadOp load =
451447 builder.createLoad (loc, ptr, /* isVolatile=*/ expr->isVolatile ());
452448
453- assert (!cir::MissingFeatures::atomicSyncScopeID ());
454-
455449 load->setAttr (" mem_order" , orderAttr);
450+ load->setAttr (" sync_scope" , scopeAttr);
456451
457452 builder.createStore (loc, load->getResult (0 ), dest);
458453 return ;
@@ -586,8 +581,6 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
586581
587582 case AtomicExpr::AO__opencl_atomic_load:
588583 case AtomicExpr::AO__hip_atomic_load:
589- case AtomicExpr::AO__scoped_atomic_load_n:
590- case AtomicExpr::AO__scoped_atomic_load:
591584
592585 case AtomicExpr::AO__opencl_atomic_store:
593586 case AtomicExpr::AO__hip_atomic_store:
@@ -671,6 +664,51 @@ static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
671664 builder.createStore (loc, result, dest);
672665}
673666
667+ // Map clang sync scope to CIR sync scope.
668+ static cir::SyncScopeKind convertSyncScopeToCIR (CIRGenFunction &cgf,
669+ SourceRange range,
670+ clang::SyncScope scope) {
671+ switch (scope) {
672+ default : {
673+ assert (!cir::MissingFeatures::atomicSyncScopeID ());
674+ cgf.cgm .errorNYI (range, " convertSyncScopeToCIR: unhandled sync scope" );
675+ return cir::SyncScopeKind::System;
676+ }
677+
678+ case clang::SyncScope::SingleScope:
679+ return cir::SyncScopeKind::SingleThread;
680+ case clang::SyncScope::SystemScope:
681+ return cir::SyncScopeKind::System;
682+ }
683+ }
684+
685+ static void emitAtomicOp (CIRGenFunction &cgf, AtomicExpr *expr, Address dest,
686+ Address ptr, Address val1, Address val2,
687+ Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
688+ cir::MemOrder order,
689+ const std::optional<Expr::EvalResult> &scopeConst,
690+ mlir::Value scopeValue) {
691+ std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel ();
692+
693+ if (!scopeModel) {
694+ emitAtomicOp (cgf, expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
695+ size, order, cir::SyncScopeKind::System);
696+ return ;
697+ }
698+
699+ if (scopeConst.has_value ()) {
700+ cir::SyncScopeKind mappedScope = convertSyncScopeToCIR (
701+ cgf, expr->getScope ()->getSourceRange (),
702+ scopeModel->map (scopeConst->Val .getInt ().getZExtValue ()));
703+ emitAtomicOp (cgf, expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
704+ size, order, mappedScope);
705+ return ;
706+ }
707+
708+ assert (!cir::MissingFeatures::atomicSyncScopeID ());
709+ cgf.cgm .errorNYI (expr->getSourceRange (), " emitAtomicOp: dynamic sync scope" );
710+ }
711+
674712static bool isMemOrderValid (uint64_t order, bool isStore, bool isLoad) {
675713 if (!cir::isValidCIRAtomicOrderingCABI (order))
676714 return false ;
@@ -688,7 +726,8 @@ static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
688726static void emitAtomicExprWithDynamicMemOrder (
689727 CIRGenFunction &cgf, mlir::Value order, AtomicExpr *e, Address dest,
690728 Address ptr, Address val1, Address val2, Expr *isWeakExpr,
691- Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad) {
729+ Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad,
730+ const std::optional<Expr::EvalResult> &scopeConst, mlir::Value scopeValue) {
692731 // The memory order is not known at compile-time. The atomic operations
693732 // can't handle runtime memory orders; the memory order must be hard coded.
694733 // Generate a "switch" statement that converts a runtime value into a
@@ -706,7 +745,7 @@ static void emitAtomicExprWithDynamicMemOrder(
706745 else
707746 emitMemOrderCaseLabel (builder, loc, order.getType (), caseOrders);
708747 emitAtomicOp (cgf, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
709- size, actualOrder);
748+ size, actualOrder, scopeConst, scopeValue );
710749 builder.createBreak (loc);
711750 builder.setInsertionPointToEnd (switchBlock);
712751 };
@@ -773,10 +812,19 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
773812 TypeInfoChars typeInfo = getContext ().getTypeInfoInChars (atomicTy);
774813 uint64_t size = typeInfo.Width .getQuantity ();
775814
776- Expr::EvalResult orderConst;
777- mlir::Value order;
778- if (!e->getOrder ()->EvaluateAsInt (orderConst, getContext ()))
779- order = emitScalarExpr (e->getOrder ());
815+ // Emit the memory order operand, and try to evaluate it as a constant.
816+ mlir::Value order = emitScalarExpr (e->getOrder ());
817+ std::optional<Expr::EvalResult> orderConst;
818+ if (Expr::EvalResult eval; e->getOrder ()->EvaluateAsInt (eval, getContext ()))
819+ orderConst.emplace (std::move (eval));
820+
821+ // Emit the sync scope operand, and try to evaluate it as a constant.
822+ mlir::Value scope =
823+ e->getScopeModel () ? emitScalarExpr (e->getScope ()) : nullptr ;
824+ std::optional<Expr::EvalResult> scopeConst;
825+ if (Expr::EvalResult eval;
826+ e->getScopeModel () && e->getScope ()->EvaluateAsInt (eval, getContext ()))
827+ scopeConst.emplace (std::move (eval));
780828
781829 bool shouldCastToIntPtrTy = true ;
782830
@@ -789,12 +837,14 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
789837 llvm_unreachable (" already handled above with emitAtomicInit" );
790838
791839 case AtomicExpr::AO__atomic_load_n:
840+ case AtomicExpr::AO__scoped_atomic_load_n:
792841 case AtomicExpr::AO__c11_atomic_load:
793842 case AtomicExpr::AO__atomic_test_and_set:
794843 case AtomicExpr::AO__atomic_clear:
795844 break ;
796845
797846 case AtomicExpr::AO__atomic_load:
847+ case AtomicExpr::AO__scoped_atomic_load:
798848 dest = emitPointerWithAlignment (e->getVal1 ());
799849 break ;
800850
@@ -927,18 +977,18 @@ RValue CIRGenFunction::emitAtomicExpr(AtomicExpr *e) {
927977 e->getOp () == AtomicExpr::AO__scoped_atomic_load ||
928978 e->getOp () == AtomicExpr::AO__scoped_atomic_load_n;
929979
930- if (!order ) {
980+ if (orderConst. has_value () ) {
931981 // We have evaluated the memory order as an integer constant in orderConst.
932982 // We should not ever get to a case where the ordering isn't a valid CABI
933983 // value, but it's hard to enforce that in general.
934- uint64_t ord = orderConst. Val .getInt ().getZExtValue ();
984+ uint64_t ord = orderConst-> Val .getInt ().getZExtValue ();
935985 if (isMemOrderValid (ord, isStore, isLoad))
936986 emitAtomicOp (*this , e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
937- size, static_cast <cir::MemOrder>(ord));
987+ size, static_cast <cir::MemOrder>(ord), scopeConst, scope );
938988 } else {
939989 emitAtomicExprWithDynamicMemOrder (*this , order, e, dest, ptr, val1, val2,
940990 isWeakExpr, orderFailExpr, size, isStore,
941- isLoad);
991+ isLoad, scopeConst, scope );
942992 }
943993
944994 if (resultTy->isVoidType ())
0 commit comments