Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions clang/include/clang/CIR/Dialect/Builder/CIRBaseBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,8 @@ class CIRBaseBuilderTy : public mlir::OpBuilder {
return cir::LoadOp::create(*this, loc, ptr, /*isDeref=*/false, isVolatile,
isNontemporal,
/*alignment=*/alignmentAttr,
/*mem_order=*/
cir::MemOrderAttr{},
/*sync_scope=*/cir::SyncScopeKindAttr{},
/*mem_order=*/cir::MemOrderAttr{},
/*tbaa=*/cir::TBAAAttr{});
}

Expand Down
34 changes: 21 additions & 13 deletions clang/include/clang/CIR/Dialect/IR/CIROps.td
Original file line number Diff line number Diff line change
Expand Up @@ -533,6 +533,15 @@ def CIR_MemOrder : CIR_I32EnumAttr<
I32EnumAttrCase<"SequentiallyConsistent", 5, "seq_cst">
]>;

//===----------------------------------------------------------------------===//
// C/C++ sync scope definitions
//===----------------------------------------------------------------------===//

def CIR_SyncScopeKind : CIR_I32EnumAttr<"SyncScopeKind", "memory scope kind", [
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

memory scope kind -> sync scope kind

I32EnumAttrCase<"SingleThread", 0, "single_thread">,
I32EnumAttrCase<"System", 1, "system">
]>;

//===----------------------------------------------------------------------===//
// AllocaOp
//===----------------------------------------------------------------------===//
Expand Down Expand Up @@ -670,7 +679,8 @@ def CIR_LoadOp : CIR_Op<"load", [
%4 = cir.load volatile %0 : !cir.ptr<i32>, i32

// Others
%x = cir.load align(16) atomic(seq_cst) %0 : !cir.ptr<i32>, i32
%x = cir.load align(16) syncscope(single_thread) atomic(seq_cst)
%0 : !cir.ptr<i32>, i32
```
}];

Expand All @@ -679,6 +689,7 @@ def CIR_LoadOp : CIR_Op<"load", [
UnitAttr:$is_volatile,
UnitAttr:$is_nontemporal,
OptionalAttr<I64Attr>:$alignment,
OptionalAttr<CIR_SyncScopeKind>:$sync_scope,
OptionalAttr<CIR_MemOrder>:$mem_order,
OptionalAttr<CIR_AnyTBAAAttr>:$tbaa
);
Expand All @@ -689,6 +700,7 @@ def CIR_LoadOp : CIR_Op<"load", [
(`volatile` $is_volatile^)?
(`nontemporal` $is_nontemporal^)?
(`align` `(` $alignment^ `)`)?
(`syncscope` `(` $sync_scope^ `)`)?
(`atomic` `(` $mem_order^ `)`)?
$addr `:` qualified(type($addr)) `,` type($result) attr-dict
(`tbaa` `(` $tbaa^ `)`)?
Expand All @@ -698,7 +710,8 @@ def CIR_LoadOp : CIR_Op<"load", [
// TODO(CIR): The final interface here should include an argument for the
// SyncScope::ID.
// This should be used over the ODS generated setMemOrder.
void setAtomic(cir::MemOrder order);
void setAtomic(cir::MemOrder order,
cir::SyncScopeKind scope);
}];

// FIXME: add verifier.
Expand Down Expand Up @@ -6094,11 +6107,6 @@ def CIR_AtomicXchg : CIR_Op<"atomic.xchg", [
let hasVerifier = 1;
}

def CIR_MemScopeKind : CIR_I32EnumAttr<"MemScopeKind", "memory scope kind", [
I32EnumAttrCase<"SingleThread", 0, "single_thread">,
I32EnumAttrCase<"System", 1, "system">
]>;

def CIR_AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", [
AllTypesMatch<["old", "expected", "desired"]>
]> {
Expand All @@ -6122,7 +6130,7 @@ def CIR_AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", [
CIR_AnyType:$desired,
Arg<CIR_MemOrder, "success memory order">:$succ_order,
Arg<CIR_MemOrder, "failure memory order">:$fail_order,
OptionalAttr<CIR_MemScopeKind>:$syncscope,
OptionalAttr<CIR_SyncScopeKind>:$sync_scope,
OptionalAttr<I64Attr>:$alignment,
UnitAttr:$weak,
UnitAttr:$is_volatile);
Expand All @@ -6135,7 +6143,7 @@ def CIR_AtomicCmpXchg : CIR_Op<"atomic.cmp_xchg", [
`success` `=` $succ_order `,`
`failure` `=` $fail_order
`)`
(`syncscope` `(` $syncscope^ `)`)?
(`syncscope` `(` $sync_scope^ `)`)?
(`align` `(` $alignment^ `)`)?
(`weak` $weak^)?
(`volatile` $is_volatile^)?
Expand Down Expand Up @@ -6165,7 +6173,7 @@ def CIR_AtomicTestAndSetOp : CIR_Op<"atomic.test_and_set"> {
let arguments = (ins
Arg<CIR_PtrToType<CIR_SInt8>, "", [MemRead, MemWrite]>:$ptr,
Arg<CIR_MemOrder, "memory order">:$mem_order,
OptionalAttr<CIR_MemScopeKind>:$syncscope,
OptionalAttr<CIR_SyncScopeKind>:$sync_scope,
OptionalAttr<I64Attr>:$alignment,
UnitAttr:$is_volatile);

Expand Down Expand Up @@ -6196,7 +6204,7 @@ def CIR_AtomicClearOp : CIR_Op<"atomic.clear"> {
let arguments = (ins
Arg<CIR_PtrToType<CIR_SInt8>, "", [MemRead, MemWrite]>:$ptr,
Arg<CIR_MemOrder, "memory order">:$mem_order,
OptionalAttr<CIR_MemScopeKind>:$syncscope,
OptionalAttr<CIR_SyncScopeKind>:$sync_scope,
OptionalAttr<I64Attr>:$alignment,
UnitAttr:$is_volatile);

Expand Down Expand Up @@ -6230,11 +6238,11 @@ def CIR_AtomicFence : CIR_Op<"atomic.fence"> {

let arguments = (ins
Arg<CIR_MemOrder, "memory order">:$ordering,
OptionalAttr<CIR_MemScopeKind>:$syncscope
OptionalAttr<CIR_SyncScopeKind>:$sync_scope
);

let assemblyFormat = [{
(`syncscope` `(` $syncscope^ `)`)? $ordering attr-dict
(`syncscope` `(` $sync_scope^ `)`)? $ordering attr-dict
}];
}

Expand Down
40 changes: 28 additions & 12 deletions clang/lib/CIR/CodeGen/CIRGenAtomic.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "CIRGenOpenMPRuntime.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/Basic/SyncScope.h"
#include "clang/CIR/Dialect/IR/CIRAttrs.h"
#include "clang/CIR/Dialect/IR/CIRDataLayout.h"
#include "clang/CIR/Dialect/IR/CIRDialect.h"
Expand Down Expand Up @@ -350,6 +351,20 @@ static cir::IntAttr extractIntAttr(mlir::Value v) {
return {};
}

// Maps SyncScope::SingleScope to SyncScopeKind::SingleThread,
// SyncScope::SystemScope to SyncScopeKind::System,
// and asserts (llvm_unreachable) for anything else.
static cir::SyncScopeKind convertSyncScopeToCIR(clang::SyncScope scope) {
switch (scope) {
case clang::SyncScope::SingleScope:
return cir::SyncScopeKind::SingleThread;
case clang::SyncScope::SystemScope:
return cir::SyncScopeKind::System;
default:
llvm_unreachable("NYI");
}
}

// Inspect a value that is the strong/weak flag for a compare-exchange. If it
// is a constant of intergral or boolean type, set `val` to the constant's
// boolean value and return true. Otherwise leave `val` unchanged and return
Expand Down Expand Up @@ -418,7 +433,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak,
Address Val2, uint64_t Size,
cir::MemOrder SuccessOrder,
cir::MemOrder FailureOrder,
cir::MemScopeKind Scope) {
cir::SyncScopeKind Scope) {
auto &builder = CGF.getBuilder();
auto loc = CGF.getLoc(E->getSourceRange());
auto Expected = builder.createLoad(loc, Val1);
Expand All @@ -428,7 +443,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak,
builder, loc, Expected.getType(), boolTy, Ptr.getPointer(), Expected,
Desired, cir::MemOrderAttr::get(&CGF.getMLIRContext(), SuccessOrder),
cir::MemOrderAttr::get(&CGF.getMLIRContext(), FailureOrder),
cir::MemScopeKindAttr::get(&CGF.getMLIRContext(), Scope),
cir::SyncScopeKindAttr::get(&CGF.getMLIRContext(), Scope),
builder.getI64IntegerAttr(Ptr.getAlignment().getAsAlign().value()));
cmpxchg.setIsVolatile(E->isVolatile());
cmpxchg.setWeak(IsWeak);
Expand Down Expand Up @@ -456,7 +471,7 @@ static void emitAtomicCmpXchg(CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak,
static void emitAtomicCmpXchgFailureSet(
CIRGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr,
Address Val1, Address Val2, mlir::Value FailureOrderVal, uint64_t Size,
cir::MemOrder SuccessOrder, cir::MemScopeKind Scope) {
cir::MemOrder SuccessOrder, cir::SyncScopeKind Scope) {

cir::MemOrder FailureOrder;
if (auto ordAttr = extractIntAttr(FailureOrderVal)) {
Expand Down Expand Up @@ -546,7 +561,7 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,
Address Ptr, Address Val1, Address Val2,
mlir::Value IsWeak, mlir::Value FailureOrder,
uint64_t Size, cir::MemOrder Order,
cir::MemScopeKind Scope) {
cir::SyncScopeKind Scope) {
assert(!cir::MissingFeatures::syncScopeID());
StringRef Op;

Expand Down Expand Up @@ -594,9 +609,7 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__scoped_atomic_load_n:
case AtomicExpr::AO__scoped_atomic_load: {
auto load = builder.createLoad(loc, Ptr);
// FIXME(cir): add scope information.
assert(!cir::MissingFeatures::syncScopeID());
load.setMemOrder(Order);
load.setAtomic(Order, Scope);
load.setIsVolatile(E->isVolatile());

// TODO(cir): this logic should be part of createStore, but doing so
Expand Down Expand Up @@ -748,7 +761,7 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__atomic_test_and_set: {
auto op = cir::AtomicTestAndSetOp::create(
builder, loc, Ptr.getPointer(), Order,
cir::MemScopeKindAttr::get(&CGF.getMLIRContext(), Scope),
cir::SyncScopeKindAttr::get(&CGF.getMLIRContext(), Scope),
builder.getI64IntegerAttr(Ptr.getAlignment().getQuantity()),
E->isVolatile());
builder.createStore(loc, op, Dest);
Expand All @@ -758,7 +771,7 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__atomic_clear: {
cir::AtomicClearOp::create(
builder, loc, Ptr.getPointer(), Order,
cir::MemScopeKindAttr::get(&CGF.getMLIRContext(), Scope),
cir::SyncScopeKindAttr::get(&CGF.getMLIRContext(), Scope),
builder.getI64IntegerAttr(Ptr.getAlignment().getQuantity()),
E->isVolatile());
return;
Expand Down Expand Up @@ -813,14 +826,17 @@ static void emitAtomicOp(CIRGenFunction &CGF, AtomicExpr *Expr, Address Dest,
if (!ScopeModel) {
assert(!cir::MissingFeatures::syncScopeID());
emitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order, cir::MemScopeKind::System);
Order, cir::SyncScopeKind::System);
return;
}

// Handle constant scope.
if (extractIntAttr(Scope)) {
if (auto scopeAttr = extractIntAttr(Scope)) {
assert(!cir::MissingFeatures::syncScopeID());
llvm_unreachable("NYI");
auto mappedScope =
convertSyncScopeToCIR(ScopeModel->map(scopeAttr.getUInt()));
emitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
Order, mappedScope);
return;
}

Expand Down
1 change: 1 addition & 0 deletions clang/lib/CIR/CodeGen/CIRGenBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -870,6 +870,7 @@ class CIRGenBuilderTy : public cir::CIRBaseBuilderTy {
return cir::LoadOp::create(
*this, loc, addr.getElementType(), addr.getPointer(), /*isDeref=*/false,
/*is_volatile=*/isVolatile, /*is_nontemporal=*/isNontemporal, align,
/*sync_scope=*/cir::SyncScopeKindAttr{},
/*mem_order=*/cir::MemOrderAttr{}, /*tbaa=*/cir::TBAAAttr{});
}

Expand Down
10 changes: 5 additions & 5 deletions clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -370,15 +370,15 @@ static mlir::Value makeAtomicCmpXchgValue(CIRGenFunction &cgf,
cir::MemOrder::SequentiallyConsistent),
MemOrderAttr::get(&cgf.getMLIRContext(),
cir::MemOrder::SequentiallyConsistent),
MemScopeKindAttr::get(&cgf.getMLIRContext(), cir::MemScopeKind::System),
SyncScopeKindAttr::get(&cgf.getMLIRContext(), cir::SyncScopeKind::System),
builder.getI64IntegerAttr(destAddr.getAlignment().getAsAlign().value()));

return returnBool ? op.getResult(1) : op.getResult(0);
}

static mlir::Value makeAtomicFenceValue(CIRGenFunction &cgf,
const CallExpr *expr,
cir::MemScopeKind syncScope) {
cir::SyncScopeKind syncScope) {
auto &builder = cgf.getBuilder();
mlir::Value orderingVal = cgf.emitScalarExpr(expr->getArg(0));

Expand All @@ -392,7 +392,7 @@ static mlir::Value makeAtomicFenceValue(CIRGenFunction &cgf,

cir::AtomicFence::create(
builder, cgf.getLoc(expr->getSourceRange()), ordering,
MemScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
SyncScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
}

return mlir::Value();
Expand Down Expand Up @@ -2155,10 +2155,10 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,

case Builtin::BI__atomic_thread_fence:
return RValue::get(
makeAtomicFenceValue(*this, E, cir::MemScopeKind::System));
makeAtomicFenceValue(*this, E, cir::SyncScopeKind::System));
case Builtin::BI__atomic_signal_fence:
return RValue::get(
makeAtomicFenceValue(*this, E, cir::MemScopeKind::SingleThread));
makeAtomicFenceValue(*this, E, cir::SyncScopeKind::SingleThread));
case Builtin::BI__c11_atomic_thread_fence:
case Builtin::BI__c11_atomic_signal_fence:
llvm_unreachable("BI__c11_atomic_thread_fence like NYI");
Expand Down
3 changes: 2 additions & 1 deletion clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "CIRGenFunction.h"
#include "CIRGenModule.h"
#include "TargetInfo.h"
#include "clang/CIR/Dialect/IR/CIROpsEnums.h"
#include "clang/CIR/MissingFeatures.h"

// TODO(cir): once all builtins are covered, decide whether we still
Expand Down Expand Up @@ -4489,7 +4490,7 @@ CIRGenFunction::emitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E,
case NEON::BI__builtin_neon_vldap1q_lane_s64: {
cir::LoadOp Load = builder.createAlignedLoad(
Ops[0].getLoc(), vTy.getElementType(), Ops[0], PtrOp0.getAlignment());
Load.setAtomic(cir::MemOrder::Acquire);
Load.setAtomic(cir::MemOrder::Acquire, cir::SyncScopeKind::System);
return cir::VecInsertOp::create(builder, getLoc(E->getExprLoc()),
builder.createBitcast(Ops[1], vTy), Load,
Ops[2]);
Expand Down
7 changes: 2 additions & 5 deletions clang/lib/CIR/Dialect/IR/CIRDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1013,12 +1013,9 @@ LogicalResult cir::ComplexImagPtrOp::verify() {
// LoadOp
//===----------------------------------------------------------------------===//

// TODO(CIR): The final interface here should include an argument for the
// SyncScope::ID.
void cir::LoadOp::setAtomic(cir::MemOrder order) {
void cir::LoadOp::setAtomic(cir::MemOrder order, cir::SyncScopeKind scope) {
setMemOrder(order);
if (cir::MissingFeatures::syncScopeID())
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Now that syncScopeID is implemented, you should also delete syncScopeID from the MissingFeatures header entry.

llvm_unreachable("NYI");
setSyncScope(scope);
}

//===----------------------------------------------------------------------===//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,9 @@ void ItaniumCXXABI::lowerGetMethod(
rewriter, op.getLoc(), vtablePtrPtr, /*isDeref=*/false,
/*isVolatile=*/false,
/*isNontemporal=*/false,
/*alignment=*/mlir::IntegerAttr(), /*mem_order=*/cir::MemOrderAttr(),
/*alignment=*/mlir::IntegerAttr(),
/*sync_scope=*/cir::SyncScopeKindAttr{},
/*mem_order=*/cir::MemOrderAttr(),
/*tbaa=*/mlir::ArrayAttr());

// Get the vtable offset.
Expand Down Expand Up @@ -421,6 +423,7 @@ void ItaniumCXXABI::lowerGetMethod(
/*isDeref=*/false, /*isVolatile=*/false,
/*isNontemporal=*/false,
/*alignment=*/mlir::IntegerAttr(),
/*sync_scope=*/cir::SyncScopeKindAttr{},
/*mem_order=*/cir::MemOrderAttr(),
/*tbaa=*/mlir::ArrayAttr());
}
Expand Down
21 changes: 12 additions & 9 deletions clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -423,10 +423,11 @@ static mlir::Value emitToMemory(mlir::ConversionPatternRewriter &rewriter,
}

std::optional<llvm::StringRef>
getLLVMSyncScope(std::optional<cir::MemScopeKind> syncScope) {
getLLVMSyncScope(std::optional<cir::SyncScopeKind> syncScope) {
if (syncScope.has_value())
return syncScope.value() == cir::MemScopeKind::SingleThread ? "singlethread"
: "";
return syncScope.value() == cir::SyncScopeKind::SingleThread
? "singlethread"
: "";
return std::nullopt;
}
} // namespace
Expand Down Expand Up @@ -1829,11 +1830,13 @@ mlir::LogicalResult CIRToLLVMLoadOpLowering::matchAndRewrite(
invariant = isLoadOrStoreInvariant(op.getAddr());

// TODO: nontemporal, syncscope.
auto syncScope = getLLVMSyncScope(op.getSyncScope());
auto newLoad = mlir::LLVM::LoadOp::create(
rewriter, op->getLoc(), llvmTy, adaptor.getAddr(),
/* alignment */ alignment, op.getIsVolatile(),
/* nontemporal */ op.getIsNontemporal(),
/* invariant */ false, /* invariantGroup */ invariant, ordering);
/*alignment*/ alignment, op.getIsVolatile(),
/*nontemporal*/ op.getIsNontemporal(),
/*invariant*/ false, /*invariantGroup*/ invariant, ordering,
/*syncscope=*/syncScope.value_or(StringRef()));

// Convert adapted result to its original type if needed.
mlir::Value result =
Expand Down Expand Up @@ -3499,7 +3502,7 @@ mlir::LogicalResult CIRToLLVMAtomicCmpXchgLowering::matchAndRewrite(
rewriter, op.getLoc(), adaptor.getPtr(), expected, desired,
getLLVMAtomicOrder(adaptor.getSuccOrder()),
getLLVMAtomicOrder(adaptor.getFailOrder()));
cmpxchg.setSyncscope(getLLVMSyncScope(adaptor.getSyncscope()));
cmpxchg.setSyncscope(getLLVMSyncScope(adaptor.getSyncScope()));
cmpxchg.setAlignment(adaptor.getAlignment());
cmpxchg.setWeak(adaptor.getWeak());
cmpxchg.setVolatile_(adaptor.getIsVolatile());
Expand Down Expand Up @@ -3666,7 +3669,7 @@ mlir::LogicalResult CIRToLLVMAtomicTestAndSetOpLowering::matchAndRewrite(
mlir::ConversionPatternRewriter &rewriter) const {
mlir::LLVM::AtomicOrdering llvmOrder = getLLVMAtomicOrder(op.getMemOrder());
llvm::StringRef llvmSyncScope =
getLLVMSyncScope(adaptor.getSyncscope()).value_or(StringRef());
getLLVMSyncScope(adaptor.getSyncScope()).value_or(StringRef());

auto one = mlir::LLVM::ConstantOp::create(rewriter, op.getLoc(),
rewriter.getI8Type(), 1);
Expand Down Expand Up @@ -3702,7 +3705,7 @@ mlir::LogicalResult CIRToLLVMAtomicFenceLowering::matchAndRewrite(
auto llvmOrder = getLLVMAtomicOrder(adaptor.getOrdering());

auto fence = mlir::LLVM::FenceOp::create(rewriter, op.getLoc(), llvmOrder);
fence.setSyncscope(getLLVMSyncScope(adaptor.getSyncscope()));
fence.setSyncscope(getLLVMSyncScope(adaptor.getSyncScope()));

rewriter.replaceOp(op, fence);

Expand Down
Loading