summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorKonstantin Zhuravlyov <kzhuravl_dev@outlook.com>2017-07-11 22:23:00 +0000
committerKonstantin Zhuravlyov <kzhuravl_dev@outlook.com>2017-07-11 22:23:00 +0000
commit8f85685860c0f6018a83b804f33e47f8122a9eba (patch)
tree70998f02d9f575729c633331fa2b8d3fcfaf3c33 /lib
parentfdda7ea9d5f2dea87392cde2577c6ea6fd142433 (diff)
Enhance synchscope representation
OpenCL 2.0 introduces the notion of memory scopes in atomic operations to global and local memory. These scopes restrict how synchronization is achieved, which can result in improved performance. This change extends existing notion of synchronization scopes in LLVM to support arbitrary scopes expressed as target-specific strings, in addition to the already defined scopes (single thread, system). The LLVM IR and MIR syntax for expressing synchronization scopes has changed to use *syncscope("<scope>")*, where <scope> can be "singlethread" (this replaces *singlethread* keyword), or a target-specific name. As before, if the scope is not specified, it defaults to CrossThread/System scope. Implementation details: - Mapping from synchronization scope name/string to synchronization scope id is stored in LLVM context; - CrossThread/System and SingleThread scopes are pre-defined to efficiently check for known scopes without comparing strings; - Synchronization scope names are stored in SYNC_SCOPE_NAMES_BLOCK in the bitcode. Differential Revision: https://reviews.llvm.org/D21723 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@307722 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/AsmParser/LLLexer.cpp2
-rw-r--r--lib/AsmParser/LLParser.cpp64
-rw-r--r--lib/AsmParser/LLParser.h3
-rw-r--r--lib/AsmParser/LLToken.h2
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.cpp92
-rw-r--r--lib/Bitcode/Writer/BitcodeWriter.cpp43
-rw-r--r--lib/CodeGen/AtomicExpandPass.cpp8
-rw-r--r--lib/CodeGen/GlobalISel/IRTranslator.cpp4
-rw-r--r--lib/CodeGen/MIRParser/MILexer.cpp10
-rw-r--r--lib/CodeGen/MIRParser/MILexer.h3
-rw-r--r--lib/CodeGen/MIRParser/MIParser.cpp45
-rw-r--r--lib/CodeGen/MIRPrinter.cpp31
-rw-r--r--lib/CodeGen/MachineFunction.cpp12
-rw-r--r--lib/CodeGen/MachineInstr.cpp6
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp8
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp18
-rw-r--r--lib/IR/AsmWriter.cpp66
-rw-r--r--lib/IR/Core.cpp21
-rw-r--r--lib/IR/Instruction.cpp11
-rw-r--r--lib/IR/Instructions.cpp68
-rw-r--r--lib/IR/LLVMContext.cpp18
-rw-r--r--lib/IR/LLVMContextImpl.cpp14
-rw-r--r--lib/IR/LLVMContextImpl.h14
-rw-r--r--lib/IR/Verifier.cpp4
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp6
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp4
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp10
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp10
-rw-r--r--lib/Transforms/Instrumentation/ThreadSanitizer.cpp7
-rw-r--r--lib/Transforms/Scalar/GVN.cpp2
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp2
-rw-r--r--lib/Transforms/Scalar/SROA.cpp6
-rw-r--r--lib/Transforms/Utils/FunctionComparator.cpp18
34 files changed, 433 insertions, 209 deletions
diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp
index a49276099f1..428bb21fbf5 100644
--- a/lib/AsmParser/LLLexer.cpp
+++ b/lib/AsmParser/LLLexer.cpp
@@ -542,7 +542,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(release);
KEYWORD(acq_rel);
KEYWORD(seq_cst);
- KEYWORD(singlethread);
+ KEYWORD(syncscope);
KEYWORD(nnan);
KEYWORD(ninf);
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp
index 7817449dac7..717eb0e00f4 100644
--- a/lib/AsmParser/LLParser.cpp
+++ b/lib/AsmParser/LLParser.cpp
@@ -1919,20 +1919,42 @@ bool LLParser::parseAllocSizeArguments(unsigned &BaseSizeArg,
}
/// ParseScopeAndOrdering
-/// if isAtomic: ::= 'singlethread'? AtomicOrdering
+/// if isAtomic: ::= SyncScope? AtomicOrdering
/// else: ::=
///
/// This sets Scope and Ordering to the parsed values.
-bool LLParser::ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
+bool LLParser::ParseScopeAndOrdering(bool isAtomic, SyncScope::ID &SSID,
AtomicOrdering &Ordering) {
if (!isAtomic)
return false;
- Scope = CrossThread;
- if (EatIfPresent(lltok::kw_singlethread))
- Scope = SingleThread;
+ return ParseScope(SSID) || ParseOrdering(Ordering);
+}
+
+/// ParseScope
+/// ::= syncscope("singlethread" | "<target scope>")?
+///
+/// This sets synchronization scope ID to the ID of the parsed value.
+bool LLParser::ParseScope(SyncScope::ID &SSID) {
+ SSID = SyncScope::System;
+ if (EatIfPresent(lltok::kw_syncscope)) {
+ auto StartParenAt = Lex.getLoc();
+ if (!EatIfPresent(lltok::lparen))
+ return Error(StartParenAt, "Expected '(' in syncscope");
+
+ std::string SSN;
+ auto SSNAt = Lex.getLoc();
+ if (ParseStringConstant(SSN))
+ return Error(SSNAt, "Expected synchronization scope name");
- return ParseOrdering(Ordering);
+ auto EndParenAt = Lex.getLoc();
+ if (!EatIfPresent(lltok::rparen))
+ return Error(EndParenAt, "Expected ')' in syncscope");
+
+ SSID = Context.getOrInsertSyncScopeID(SSN);
+ }
+
+ return false;
}
/// ParseOrdering
@@ -6100,7 +6122,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
bool AteExtraComma = false;
bool isAtomic = false;
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
- SynchronizationScope Scope = CrossThread;
+ SyncScope::ID SSID = SyncScope::System;
if (Lex.getKind() == lltok::kw_atomic) {
isAtomic = true;
@@ -6118,7 +6140,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
if (ParseType(Ty) ||
ParseToken(lltok::comma, "expected comma after load's type") ||
ParseTypeAndValue(Val, Loc, PFS) ||
- ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
+ ParseScopeAndOrdering(isAtomic, SSID, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
return true;
@@ -6134,7 +6156,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
return Error(ExplicitTypeLoc,
"explicit pointee type doesn't match operand's pointee type");
- Inst = new LoadInst(Ty, Val, "", isVolatile, Alignment, Ordering, Scope);
+ Inst = new LoadInst(Ty, Val, "", isVolatile, Alignment, Ordering, SSID);
return AteExtraComma ? InstExtraComma : InstNormal;
}
@@ -6149,7 +6171,7 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
bool AteExtraComma = false;
bool isAtomic = false;
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
- SynchronizationScope Scope = CrossThread;
+ SyncScope::ID SSID = SyncScope::System;
if (Lex.getKind() == lltok::kw_atomic) {
isAtomic = true;
@@ -6165,7 +6187,7 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
if (ParseTypeAndValue(Val, Loc, PFS) ||
ParseToken(lltok::comma, "expected ',' after store operand") ||
ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
- ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
+ ParseScopeAndOrdering(isAtomic, SSID, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
return true;
@@ -6181,7 +6203,7 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
Ordering == AtomicOrdering::AcquireRelease)
return Error(Loc, "atomic store cannot use Acquire ordering");
- Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope);
+ Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, SSID);
return AteExtraComma ? InstExtraComma : InstNormal;
}
@@ -6193,7 +6215,7 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
bool AteExtraComma = false;
AtomicOrdering SuccessOrdering = AtomicOrdering::NotAtomic;
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
- SynchronizationScope Scope = CrossThread;
+ SyncScope::ID SSID = SyncScope::System;
bool isVolatile = false;
bool isWeak = false;
@@ -6208,7 +6230,7 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
ParseTypeAndValue(Cmp, CmpLoc, PFS) ||
ParseToken(lltok::comma, "expected ',' after cmpxchg cmp operand") ||
ParseTypeAndValue(New, NewLoc, PFS) ||
- ParseScopeAndOrdering(true /*Always atomic*/, Scope, SuccessOrdering) ||
+ ParseScopeAndOrdering(true /*Always atomic*/, SSID, SuccessOrdering) ||
ParseOrdering(FailureOrdering))
return true;
@@ -6231,7 +6253,7 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
if (!New->getType()->isFirstClassType())
return Error(NewLoc, "cmpxchg operand must be a first class value");
AtomicCmpXchgInst *CXI = new AtomicCmpXchgInst(
- Ptr, Cmp, New, SuccessOrdering, FailureOrdering, Scope);
+ Ptr, Cmp, New, SuccessOrdering, FailureOrdering, SSID);
CXI->setVolatile(isVolatile);
CXI->setWeak(isWeak);
Inst = CXI;
@@ -6245,7 +6267,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
Value *Ptr, *Val; LocTy PtrLoc, ValLoc;
bool AteExtraComma = false;
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
- SynchronizationScope Scope = CrossThread;
+ SyncScope::ID SSID = SyncScope::System;
bool isVolatile = false;
AtomicRMWInst::BinOp Operation;
@@ -6271,7 +6293,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
if (ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
ParseToken(lltok::comma, "expected ',' after atomicrmw address") ||
ParseTypeAndValue(Val, ValLoc, PFS) ||
- ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
+ ParseScopeAndOrdering(true /*Always atomic*/, SSID, Ordering))
return true;
if (Ordering == AtomicOrdering::Unordered)
@@ -6288,7 +6310,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
" integer");
AtomicRMWInst *RMWI =
- new AtomicRMWInst(Operation, Ptr, Val, Ordering, Scope);
+ new AtomicRMWInst(Operation, Ptr, Val, Ordering, SSID);
RMWI->setVolatile(isVolatile);
Inst = RMWI;
return AteExtraComma ? InstExtraComma : InstNormal;
@@ -6298,8 +6320,8 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
/// ::= 'fence' 'singlethread'? AtomicOrdering
int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
- SynchronizationScope Scope = CrossThread;
- if (ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
+ SyncScope::ID SSID = SyncScope::System;
+ if (ParseScopeAndOrdering(true /*Always atomic*/, SSID, Ordering))
return true;
if (Ordering == AtomicOrdering::Unordered)
@@ -6307,7 +6329,7 @@ int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
if (Ordering == AtomicOrdering::Monotonic)
return TokError("fence cannot be monotonic");
- Inst = new FenceInst(Context, Ordering, Scope);
+ Inst = new FenceInst(Context, Ordering, SSID);
return InstNormal;
}
diff --git a/lib/AsmParser/LLParser.h b/lib/AsmParser/LLParser.h
index 4616c2e8694..d5b059355c4 100644
--- a/lib/AsmParser/LLParser.h
+++ b/lib/AsmParser/LLParser.h
@@ -241,8 +241,9 @@ namespace llvm {
bool ParseOptionalCallingConv(unsigned &CC);
bool ParseOptionalAlignment(unsigned &Alignment);
bool ParseOptionalDerefAttrBytes(lltok::Kind AttrKind, uint64_t &Bytes);
- bool ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
+ bool ParseScopeAndOrdering(bool isAtomic, SyncScope::ID &SSID,
AtomicOrdering &Ordering);
+ bool ParseScope(SyncScope::ID &SSID);
bool ParseOrdering(AtomicOrdering &Ordering);
bool ParseOptionalStackAlignment(unsigned &Alignment);
bool ParseOptionalCommaAlign(unsigned &Alignment, bool &AteExtraComma);
diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h
index 6c8ed7da495..9c7a06de81b 100644
--- a/lib/AsmParser/LLToken.h
+++ b/lib/AsmParser/LLToken.h
@@ -93,7 +93,7 @@ enum Kind {
kw_release,
kw_acq_rel,
kw_seq_cst,
- kw_singlethread,
+ kw_syncscope,
kw_nnan,
kw_ninf,
kw_nsz,
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 1ebef317313..2b4970a80cd 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -513,6 +513,7 @@ class BitcodeReader : public BitcodeReaderBase, public GVMaterializer {
TBAAVerifier TBAAVerifyHelper;
std::vector<std::string> BundleTags;
+ SmallVector<SyncScope::ID, 8> SSIDs;
public:
BitcodeReader(BitstreamCursor Stream, StringRef Strtab,
@@ -648,6 +649,7 @@ private:
Error parseTypeTable();
Error parseTypeTableBody();
Error parseOperandBundleTags();
+ Error parseSyncScopeNames();
Expected<Value *> recordValue(SmallVectorImpl<uint64_t> &Record,
unsigned NameIndex, Triple &TT);
@@ -668,6 +670,8 @@ private:
Error findFunctionInStream(
Function *F,
DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator);
+
+ SyncScope::ID getDecodedSyncScopeID(unsigned Val);
};
/// Class to manage reading and parsing function summary index bitcode
@@ -998,14 +1002,6 @@ static AtomicOrdering getDecodedOrdering(unsigned Val) {
}
}
-static SynchronizationScope getDecodedSynchScope(unsigned Val) {
- switch (Val) {
- case bitc::SYNCHSCOPE_SINGLETHREAD: return SingleThread;
- default: // Map unknown scopes to cross-thread.
- case bitc::SYNCHSCOPE_CROSSTHREAD: return CrossThread;
- }
-}
-
static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) {
switch (Val) {
default: // Map unknown selection kinds to any.
@@ -1745,6 +1741,44 @@ Error BitcodeReader::parseOperandBundleTags() {
}
}
+Error BitcodeReader::parseSyncScopeNames() {
+ if (Stream.EnterSubBlock(bitc::SYNC_SCOPE_NAMES_BLOCK_ID))
+ return error("Invalid record");
+
+ if (!SSIDs.empty())
+ return error("Invalid multiple synchronization scope names blocks");
+
+ SmallVector<uint64_t, 64> Record;
+ while (true) {
+ BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ if (SSIDs.empty())
+ return error("Invalid empty synchronization scope names block");
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Synchronization scope names are implicitly mapped to synchronization
+ // scope IDs by their order.
+
+ if (Stream.readRecord(Entry.ID, Record) != bitc::SYNC_SCOPE_NAME)
+ return error("Invalid record");
+
+ SmallString<16> SSN;
+ if (convertToString(Record, 0, SSN))
+ return error("Invalid record");
+
+ SSIDs.push_back(Context.getOrInsertSyncScopeID(SSN));
+ Record.clear();
+ }
+}
+
/// Associate a value with its name from the given index in the provided record.
Expected<Value *> BitcodeReader::recordValue(SmallVectorImpl<uint64_t> &Record,
unsigned NameIndex, Triple &TT) {
@@ -3132,6 +3166,10 @@ Error BitcodeReader::parseModule(uint64_t ResumeBit,
if (Error Err = parseOperandBundleTags())
return Err;
break;
+ case bitc::SYNC_SCOPE_NAMES_BLOCK_ID:
+ if (Error Err = parseSyncScopeNames())
+ return Err;
+ break;
}
continue;
@@ -4204,7 +4242,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
break;
}
case bitc::FUNC_CODE_INST_LOADATOMIC: {
- // LOADATOMIC: [opty, op, align, vol, ordering, synchscope]
+ // LOADATOMIC: [opty, op, align, vol, ordering, ssid]
unsigned OpNum = 0;
Value *Op;
if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
@@ -4226,12 +4264,12 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
return error("Invalid record");
if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
return error("Invalid record");
- SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]);
unsigned Align;
if (Error Err = parseAlignmentValue(Record[OpNum], Align))
return Err;
- I = new LoadInst(Op, "", Record[OpNum+1], Align, Ordering, SynchScope);
+ I = new LoadInst(Op, "", Record[OpNum+1], Align, Ordering, SSID);
InstructionList.push_back(I);
break;
@@ -4260,7 +4298,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
}
case bitc::FUNC_CODE_INST_STOREATOMIC:
case bitc::FUNC_CODE_INST_STOREATOMIC_OLD: {
- // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, synchscope]
+ // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, ssid]
unsigned OpNum = 0;
Value *Val, *Ptr;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
@@ -4280,20 +4318,20 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
Ordering == AtomicOrdering::Acquire ||
Ordering == AtomicOrdering::AcquireRelease)
return error("Invalid record");
- SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]);
if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
return error("Invalid record");
unsigned Align;
if (Error Err = parseAlignmentValue(Record[OpNum], Align))
return Err;
- I = new StoreInst(Val, Ptr, Record[OpNum+1], Align, Ordering, SynchScope);
+ I = new StoreInst(Val, Ptr, Record[OpNum+1], Align, Ordering, SSID);
InstructionList.push_back(I);
break;
}
case bitc::FUNC_CODE_INST_CMPXCHG_OLD:
case bitc::FUNC_CODE_INST_CMPXCHG: {
- // CMPXCHG:[ptrty, ptr, cmp, new, vol, successordering, synchscope,
+ // CMPXCHG:[ptrty, ptr, cmp, new, vol, successordering, ssid,
// failureordering?, isweak?]
unsigned OpNum = 0;
Value *Ptr, *Cmp, *New;
@@ -4310,7 +4348,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
if (SuccessOrdering == AtomicOrdering::NotAtomic ||
SuccessOrdering == AtomicOrdering::Unordered)
return error("Invalid record");
- SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]);
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 2]);
if (Error Err = typeCheckLoadStoreInst(Cmp->getType(), Ptr->getType()))
return Err;
@@ -4322,7 +4360,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
FailureOrdering = getDecodedOrdering(Record[OpNum + 3]);
I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering,
- SynchScope);
+ SSID);
cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]);
if (Record.size() < 8) {
@@ -4339,7 +4377,7 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
break;
}
case bitc::FUNC_CODE_INST_ATOMICRMW: {
- // ATOMICRMW:[ptrty, ptr, val, op, vol, ordering, synchscope]
+ // ATOMICRMW:[ptrty, ptr, val, op, vol, ordering, ssid]
unsigned OpNum = 0;
Value *Ptr, *Val;
if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
@@ -4356,13 +4394,13 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
if (Ordering == AtomicOrdering::NotAtomic ||
Ordering == AtomicOrdering::Unordered)
return error("Invalid record");
- SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
- I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]);
+ I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SSID);
cast<AtomicRMWInst>(I)->setVolatile(Record[OpNum+1]);
InstructionList.push_back(I);
break;
}
- case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, synchscope]
+ case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, ssid]
if (2 != Record.size())
return error("Invalid record");
AtomicOrdering Ordering = getDecodedOrdering(Record[0]);
@@ -4370,8 +4408,8 @@ Error BitcodeReader::parseFunctionBody(Function *F) {
Ordering == AtomicOrdering::Unordered ||
Ordering == AtomicOrdering::Monotonic)
return error("Invalid record");
- SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]);
- I = new FenceInst(Context, Ordering, SynchScope);
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[1]);
+ I = new FenceInst(Context, Ordering, SSID);
InstructionList.push_back(I);
break;
}
@@ -4567,6 +4605,14 @@ Error BitcodeReader::findFunctionInStream(
return Error::success();
}
+SyncScope::ID BitcodeReader::getDecodedSyncScopeID(unsigned Val) {
+ if (Val == SyncScope::SingleThread || Val == SyncScope::System)
+ return SyncScope::ID(Val);
+ if (Val >= SSIDs.size())
+ return SyncScope::System; // Map unknown synchronization scopes to system.
+ return SSIDs[Val];
+}
+
//===----------------------------------------------------------------------===//
// GVMaterializer implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp
index 862d43381f3..0e518d2bbc8 100644
--- a/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -266,6 +266,7 @@ private:
const GlobalObject &GO);
void writeModuleMetadataKinds();
void writeOperandBundleTags();
+ void writeSyncScopeNames();
void writeConstants(unsigned FirstVal, unsigned LastVal, bool isGlobal);
void writeModuleConstants();
bool pushValueAndType(const Value *V, unsigned InstID,
@@ -316,6 +317,10 @@ private:
return VE.getValueID(VI.getValue());
}
std::map<GlobalValue::GUID, unsigned> &valueIds() { return GUIDToValueIdMap; }
+
+ unsigned getEncodedSyncScopeID(SyncScope::ID SSID) {
+ return unsigned(SSID);
+ }
};
/// Class to manage the bitcode writing for a combined index.
@@ -485,14 +490,6 @@ static unsigned getEncodedOrdering(AtomicOrdering Ordering) {
llvm_unreachable("Invalid ordering");
}
-static unsigned getEncodedSynchScope(SynchronizationScope SynchScope) {
- switch (SynchScope) {
- case SingleThread: return bitc::SYNCHSCOPE_SINGLETHREAD;
- case CrossThread: return bitc::SYNCHSCOPE_CROSSTHREAD;
- }
- llvm_unreachable("Invalid synch scope");
-}
-
static void writeStringRecord(BitstreamWriter &Stream, unsigned Code,
StringRef Str, unsigned AbbrevToUse) {
SmallVector<unsigned, 64> Vals;
@@ -2042,6 +2039,24 @@ void ModuleBitcodeWriter::writeOperandBundleTags() {
Stream.ExitBlock();
}
+void ModuleBitcodeWriter::writeSyncScopeNames() {
+ SmallVector<StringRef, 8> SSNs;
+ M.getContext().getSyncScopeNames(SSNs);
+ if (SSNs.empty())
+ return;
+
+ Stream.EnterSubblock(bitc::SYNC_SCOPE_NAMES_BLOCK_ID, 2);
+
+ SmallVector<uint64_t, 64> Record;
+ for (auto SSN : SSNs) {
+ Record.append(SSN.begin(), SSN.end());
+ Stream.EmitRecord(bitc::SYNC_SCOPE_NAME, Record, 0);
+ Record.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
static void emitSignedInt64(SmallVectorImpl<uint64_t> &Vals, uint64_t V) {
if ((int64_t)V >= 0)
Vals.push_back(V << 1);
@@ -2658,7 +2673,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(cast<LoadInst>(I).isVolatile());
if (cast<LoadInst>(I).isAtomic()) {
Vals.push_back(getEncodedOrdering(cast<LoadInst>(I).getOrdering()));
- Vals.push_back(getEncodedSynchScope(cast<LoadInst>(I).getSynchScope()));
+ Vals.push_back(getEncodedSyncScopeID(cast<LoadInst>(I).getSyncScopeID()));
}
break;
case Instruction::Store:
@@ -2672,7 +2687,8 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(cast<StoreInst>(I).isVolatile());
if (cast<StoreInst>(I).isAtomic()) {
Vals.push_back(getEncodedOrdering(cast<StoreInst>(I).getOrdering()));
- Vals.push_back(getEncodedSynchScope(cast<StoreInst>(I).getSynchScope()));
+ Vals.push_back(
+ getEncodedSyncScopeID(cast<StoreInst>(I).getSyncScopeID()));
}
break;
case Instruction::AtomicCmpXchg:
@@ -2684,7 +2700,7 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(
getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getSuccessOrdering()));
Vals.push_back(
- getEncodedSynchScope(cast<AtomicCmpXchgInst>(I).getSynchScope()));
+ getEncodedSyncScopeID(cast<AtomicCmpXchgInst>(I).getSyncScopeID()));
Vals.push_back(
getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getFailureOrdering()));
Vals.push_back(cast<AtomicCmpXchgInst>(I).isWeak());
@@ -2698,12 +2714,12 @@ void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
Vals.push_back(cast<AtomicRMWInst>(I).isVolatile());
Vals.push_back(getEncodedOrdering(cast<AtomicRMWInst>(I).getOrdering()));
Vals.push_back(
- getEncodedSynchScope(cast<AtomicRMWInst>(I).getSynchScope()));
+ getEncodedSyncScopeID(cast<AtomicRMWInst>(I).getSyncScopeID()));
break;
case Instruction::Fence:
Code = bitc::FUNC_CODE_INST_FENCE;
Vals.push_back(getEncodedOrdering(cast<FenceInst>(I).getOrdering()));
- Vals.push_back(getEncodedSynchScope(cast<FenceInst>(I).getSynchScope()));
+ Vals.push_back(getEncodedSyncScopeID(cast<FenceInst>(I).getSyncScopeID()));
break;
case Instruction::Call: {
const CallInst &CI = cast<CallInst>(I);
@@ -3716,6 +3732,7 @@ void ModuleBitcodeWriter::write() {
writeUseListBlock(nullptr);
writeOperandBundleTags();
+ writeSyncScopeNames();
// Emit function bodies.
DenseMap<const Function *, uint64_t> FunctionToBitcodeIndex;
diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp
index 344136b1f19..aa9c8e94d08 100644
--- a/lib/CodeGen/AtomicExpandPass.cpp
+++ b/lib/CodeGen/AtomicExpandPass.cpp
@@ -361,7 +361,7 @@ LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
auto *NewLI = Builder.CreateLoad(NewAddr);
NewLI->setAlignment(LI->getAlignment());
NewLI->setVolatile(LI->isVolatile());
- NewLI->setAtomic(LI->getOrdering(), LI->getSynchScope());
+ NewLI->setAtomic(LI->getOrdering(), LI->getSyncScopeID());
DEBUG(dbgs() << "Replaced " << *LI << " with " << *NewLI << "\n");
Value *NewVal = Builder.CreateBitCast(NewLI, LI->getType());
@@ -444,7 +444,7 @@ StoreInst *AtomicExpand::convertAtomicStoreToIntegerType(StoreInst *SI) {
StoreInst *NewSI = Builder.CreateStore(NewVal, NewAddr);
NewSI->setAlignment(SI->getAlignment());
NewSI->setVolatile(SI->isVolatile());
- NewSI->setAtomic(SI->getOrdering(), SI->getSynchScope());
+ NewSI->setAtomic(SI->getOrdering(), SI->getSyncScopeID());
DEBUG(dbgs() << "Replaced " << *SI << " with " << *NewSI << "\n");
SI->eraseFromParent();
return NewSI;
@@ -801,7 +801,7 @@ void AtomicExpand::expandPartwordCmpXchg(AtomicCmpXchgInst *CI) {
Value *FullWord_Cmp = Builder.CreateOr(Loaded_MaskOut, Cmp_Shifted);
AtomicCmpXchgInst *NewCI = Builder.CreateAtomicCmpXchg(
PMV.AlignedAddr, FullWord_Cmp, FullWord_NewVal, CI->getSuccessOrdering(),
- CI->getFailureOrdering(), CI->getSynchScope());
+ CI->getFailureOrdering(), CI->getSyncScopeID());
NewCI->setVolatile(CI->isVolatile());
// When we're building a strong cmpxchg, we need a loop, so you
// might think we could use a weak cmpxchg inside. But, using strong
@@ -924,7 +924,7 @@ AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *
auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal,
CI->getSuccessOrdering(),
CI->getFailureOrdering(),
- CI->getSynchScope());
+ CI->getSyncScopeID());
NewCI->setVolatile(CI->isVolatile());
NewCI->setWeak(CI->isWeak());
DEBUG(dbgs() << "Replaced " << *CI << " with " << *NewCI << "\n");
diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 521037f9d20..ed1bd995e60 100644
--- a/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -345,7 +345,7 @@ bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
*MF->getMachineMemOperand(MachinePointerInfo(LI.getPointerOperand()),
Flags, DL->getTypeStoreSize(LI.getType()),
getMemOpAlignment(LI), AAMDNodes(), nullptr,
- LI.getSynchScope(), LI.getOrdering()));
+ LI.getSyncScopeID(), LI.getOrdering()));
return true;
}
@@ -363,7 +363,7 @@ bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
*MF->getMachineMemOperand(
MachinePointerInfo(SI.getPointerOperand()), Flags,
DL->getTypeStoreSize(SI.getValueOperand()->getType()),
- getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSynchScope(),
+ getMemOpAlignment(SI), AAMDNodes(), nullptr, SI.getSyncScopeID(),
SI.getOrdering()));
return true;
}
diff --git a/lib/CodeGen/MIRParser/MILexer.cpp b/lib/CodeGen/MIRParser/MILexer.cpp
index 1f1ce6e8d72..58a655a4dee 100644
--- a/lib/CodeGen/MIRParser/MILexer.cpp
+++ b/lib/CodeGen/MIRParser/MILexer.cpp
@@ -365,6 +365,14 @@ static Cursor maybeLexIRValue(Cursor C, MIToken &Token,
return lexName(C, Token, MIToken::NamedIRValue, Rule.size(), ErrorCallback);
}
+static Cursor maybeLexStringConstant(Cursor C, MIToken &Token,
+ ErrorCallbackType ErrorCallback) {
+ if (C.peek() != '"')
+ return None;
+ return lexName(C, Token, MIToken::StringConstant, /*PrefixLength=*/0,
+ ErrorCallback);
+}
+
static Cursor lexVirtualRegister(Cursor C, MIToken &Token) {
auto Range = C;
C.advance(); // Skip '%'
@@ -630,6 +638,8 @@ StringRef llvm::lexMIToken(StringRef Source, MIToken &Token,
return R.remaining();
if (Cursor R = maybeLexEscapedIRValue(C, Token, ErrorCallback))
return R.remaining();
+ if (Cursor R = maybeLexStringConstant(C, Token, ErrorCallback))
+ return R.remaining();
Token.reset(MIToken::Error, C.remaining());
ErrorCallback(C.location(),
diff --git a/lib/CodeGen/MIRParser/MILexer.h b/lib/CodeGen/MIRParser/MILexer.h
index 3e9513111bf..ed41e07e3c1 100644
--- a/lib/CodeGen/MIRParser/MILexer.h
+++ b/lib/CodeGen/MIRParser/MILexer.h
@@ -127,7 +127,8 @@ struct MIToken {
NamedIRValue,
IRValue,
QuotedIRValue, // `<constant value>`
- SubRegisterIndex
+ SubRegisterIndex,
+ StringConstant
};
private:
diff --git a/lib/CodeGen/MIRParser/MIParser.cpp b/lib/CodeGen/MIRParser/MIParser.cpp
index c58d192284d..70dca277733 100644
--- a/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/lib/CodeGen/MIRParser/MIParser.cpp
@@ -229,6 +229,7 @@ public:
bool parseMemoryOperandFlag(MachineMemOperand::Flags &Flags);
bool parseMemoryPseudoSourceValue(const PseudoSourceValue *&PSV);
bool parseMachinePointerInfo(MachinePointerInfo &Dest);
+ bool parseOptionalScope(LLVMContext &Context, SyncScope::ID &SSID);
bool parseOptionalAtomicOrdering(AtomicOrdering &Order);
bool parseMachineMemoryOperand(MachineMemOperand *&Dest);
@@ -318,6 +319,10 @@ private:
///
/// Return true if the name isn't a name of a bitmask target flag.
bool getBitmaskTargetFlag(StringRef Name, unsigned &Flag);
+
+ /// parseStringConstant
+ /// ::= StringConstant
+ bool parseStringConstant(std::string &Result);
};
} // end anonymous namespace
@@ -2135,6 +2140,26 @@ bool MIParser::parseMachinePointerInfo(MachinePointerInfo &Dest) {
return false;
}
+bool MIParser::parseOptionalScope(LLVMContext &Context,
+ SyncScope::ID &SSID) {
+ SSID = SyncScope::System;
+ if (Token.is(MIToken::Identifier) && Token.stringValue() == "syncscope") {
+ lex();
+ if (expectAndConsume(MIToken::lparen))
+ return error("expected '(' in syncscope");
+
+ std::string SSN;
+ if (parseStringConstant(SSN))
+ return true;
+
+ SSID = Context.getOrInsertSyncScopeID(SSN);
+ if (expectAndConsume(MIToken::rparen))
+ return error("expected ')' in syncscope");
+ }
+
+ return false;
+}
+
bool MIParser::parseOptionalAtomicOrdering(AtomicOrdering &Order) {
Order = AtomicOrdering::NotAtomic;
if (Token.isNot(MIToken::Identifier))
@@ -2174,12 +2199,10 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
Flags |= MachineMemOperand::MOStore;
lex();
- // Optional "singlethread" scope.
- SynchronizationScope Scope = SynchronizationScope::CrossThread;
- if (Token.is(MIToken::Identifier) && Token.stringValue() == "singlethread") {
- Scope = SynchronizationScope::SingleThread;
- lex();
- }
+ // Optional synchronization scope.
+ SyncScope::ID SSID;
+ if (parseOptionalScope(MF.getFunction()->getContext(), SSID))
+ return true;
// Up to two atomic orderings (cmpxchg provides guarantees on failure).
AtomicOrdering Order, FailureOrder;
@@ -2244,7 +2267,7 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
if (expectAndConsume(MIToken::rparen))
return true;
Dest = MF.getMachineMemOperand(Ptr, Flags, Size, BaseAlignment, AAInfo, Range,
- Scope, Order, FailureOrder);
+ SSID, Order, FailureOrder);
return false;
}
@@ -2457,6 +2480,14 @@ bool MIParser::getBitmaskTargetFlag(StringRef Name, unsigned &Flag) {
return false;
}
+bool MIParser::parseStringConstant(std::string &Result) {
+ if (Token.isNot(MIToken::StringConstant))
+ return error("expected string constant");
+ Result = Token.stringValue();
+ lex();
+ return false;
+}
+
bool llvm::parseMachineBasicBlockDefinitions(PerFunctionMIParsingState &PFS,
StringRef Src,
SMDiagnostic &Error) {
diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp
index c524a9835f3..4cc61420ab4 100644
--- a/lib/CodeGen/MIRPrinter.cpp
+++ b/lib/CodeGen/MIRPrinter.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
@@ -139,6 +140,8 @@ class MIPrinter {
ModuleSlotTracker &MST;
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds;
const DenseMap<int, FrameIndexOperand> &StackObjectOperandMapping;
+ /// Synchronization scope names registered with LLVMContext.
+ SmallVector<StringRef, 8> SSNs;
bool canPredictBranchProbabilities(const MachineBasicBlock &MBB) const;
bool canPredictSuccessors(const MachineBasicBlock &MBB) const;
@@ -162,7 +165,8 @@ public:
void print(const MachineOperand &Op, const TargetRegisterInfo *TRI,
unsigned I, bool ShouldPrintRegisterTies,
LLT TypeToPrint, bool IsDef = false);
- void print(const MachineMemOperand &Op);
+ void print(const LLVMContext &Context, const MachineMemOperand &Op);
+ void printSyncScope(const LLVMContext &Context, SyncScope::ID SSID);
void print(const MCCFIInstruction &CFI, const TargetRegisterInfo *TRI);
};
@@ -731,11 +735,12 @@ void MIPrinter::print(const MachineInstr &MI) {
if (!MI.memoperands_empty()) {
OS << " :: ";
+ const LLVMContext &Context = MF->getFunction()->getContext();
bool NeedComma = false;
for (const auto *Op : MI.memoperands()) {
if (NeedComma)
OS << ", ";
- print(*Op);
+ print(Context, *Op);
NeedComma = true;
}
}
@@ -1031,7 +1036,7 @@ void MIPrinter::print(const MachineOperand &Op, const TargetRegisterInfo *TRI,
}
}
-void MIPrinter::print(const MachineMemOperand &Op) {
+void MIPrinter::print(const LLVMContext &Context, const MachineMemOperand &Op) {
OS << '(';
// TODO: Print operand's target specific flags.
if (Op.isVolatile())
@@ -1049,8 +1054,7 @@ void MIPrinter::print(const MachineMemOperand &Op) {
OS << "store ";
}
- if (Op.getSynchScope() == SynchronizationScope::SingleThread)
- OS << "singlethread ";
+ printSyncScope(Context, Op.getSyncScopeID());
if (Op.getOrdering() != AtomicOrdering::NotAtomic)
OS << toIRString(Op.getOrdering()) << ' ';
@@ -1119,6 +1123,23 @@ void MIPrinter::print(const MachineMemOperand &Op) {
OS << ')';
}
+void MIPrinter::printSyncScope(const LLVMContext &Context, SyncScope::ID SSID) {
+ switch (SSID) {
+ case SyncScope::System: {
+ break;
+ }
+ default: {
+ if (SSNs.empty())
+ Context.getSyncScopeNames(SSNs);
+
+ OS << "syncscope(\"";
+ PrintEscapedString(SSNs[SSID], OS);
+ OS << "\") ";
+ break;
+ }
+ }
+}
+
static void printCFIRegister(unsigned DwarfReg, raw_ostream &OS,
const TargetRegisterInfo *TRI) {
int Reg = TRI->getLLVMRegNum(DwarfReg, true);
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index bbdae6e1a49..f88e175a977 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -305,11 +305,11 @@ MachineFunction::DeleteMachineBasicBlock(MachineBasicBlock *MBB) {
MachineMemOperand *MachineFunction::getMachineMemOperand(
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
unsigned base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
- SynchronizationScope SynchScope, AtomicOrdering Ordering,
+ SyncScope::ID SSID, AtomicOrdering Ordering,
AtomicOrdering FailureOrdering) {
return new (Allocator)
MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges,
- SynchScope, Ordering, FailureOrdering);
+ SSID, Ordering, FailureOrdering);
}
MachineMemOperand *
@@ -320,13 +320,13 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
MachineMemOperand(MachinePointerInfo(MMO->getValue(),
MMO->getOffset()+Offset),
MMO->getFlags(), Size, MMO->getBaseAlignment(),
- AAMDNodes(), nullptr, MMO->getSynchScope(),
+ AAMDNodes(), nullptr, MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
return new (Allocator)
MachineMemOperand(MachinePointerInfo(MMO->getPseudoValue(),
MMO->getOffset()+Offset),
MMO->getFlags(), Size, MMO->getBaseAlignment(),
- AAMDNodes(), nullptr, MMO->getSynchScope(),
+ AAMDNodes(), nullptr, MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
}
@@ -359,7 +359,7 @@ MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
(*I)->getFlags() & ~MachineMemOperand::MOStore,
(*I)->getSize(), (*I)->getBaseAlignment(),
(*I)->getAAInfo(), nullptr,
- (*I)->getSynchScope(), (*I)->getOrdering(),
+ (*I)->getSyncScopeID(), (*I)->getOrdering(),
(*I)->getFailureOrdering());
Result[Index] = JustLoad;
}
@@ -393,7 +393,7 @@ MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
(*I)->getFlags() & ~MachineMemOperand::MOLoad,
(*I)->getSize(), (*I)->getBaseAlignment(),
(*I)->getAAInfo(), nullptr,
- (*I)->getSynchScope(), (*I)->getOrdering(),
+ (*I)->getSyncScopeID(), (*I)->getOrdering(),
(*I)->getFailureOrdering());
Result[Index] = JustStore;
}
diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp
index 7be3b1a4894..ca39db49bdb 100644
--- a/lib/CodeGen/MachineInstr.cpp
+++ b/lib/CodeGen/MachineInstr.cpp
@@ -614,7 +614,7 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f,
uint64_t s, unsigned int a,
const AAMDNodes &AAInfo,
const MDNode *Ranges,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
AtomicOrdering Ordering,
AtomicOrdering FailureOrdering)
: PtrInfo(ptrinfo), Size(s), FlagVals(f), BaseAlignLog2(Log2_32(a) + 1),
@@ -625,8 +625,8 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f,
assert(getBaseAlignment() == a && "Alignment is not a power of 2!");
assert((isLoad() || isStore()) && "Not a load/store!");
- AtomicInfo.SynchScope = static_cast<unsigned>(SynchScope);
- assert(getSynchScope() == SynchScope && "Value truncated");
+ AtomicInfo.SSID = static_cast<unsigned>(SSID);
+ assert(getSyncScopeID() == SSID && "Value truncated");
AtomicInfo.Ordering = static_cast<unsigned>(Ordering);
assert(getOrdering() == Ordering && "Value truncated");
AtomicInfo.FailureOrdering = static_cast<unsigned>(FailureOrdering);
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index f41323bd08b..823e77850c4 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -5443,7 +5443,7 @@ SDValue SelectionDAG::getAtomicCmpSwap(
unsigned Opcode, const SDLoc &dl, EVT MemVT, SDVTList VTs, SDValue Chain,
SDValue Ptr, SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
unsigned Alignment, AtomicOrdering SuccessOrdering,
- AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) {
+ AtomicOrdering FailureOrdering, SyncScope::ID SSID) {
assert(Opcode == ISD::ATOMIC_CMP_SWAP ||
Opcode == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
@@ -5459,7 +5459,7 @@ SDValue SelectionDAG::getAtomicCmpSwap(
MachineMemOperand::MOStore;
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
- AAMDNodes(), nullptr, SynchScope, SuccessOrdering,
+ AAMDNodes(), nullptr, SSID, SuccessOrdering,
FailureOrdering);
return getAtomicCmpSwap(Opcode, dl, MemVT, VTs, Chain, Ptr, Cmp, Swp, MMO);
@@ -5481,7 +5481,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
SDValue Chain, SDValue Ptr, SDValue Val,
const Value *PtrVal, unsigned Alignment,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
@@ -5501,7 +5501,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
MemVT.getStoreSize(), Alignment, AAMDNodes(),
- nullptr, SynchScope, Ordering);
+ nullptr, SSID, Ordering);
return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO);
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index dbec2796ed7..4046b90f3df 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -3990,7 +3990,7 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
SDLoc dl = getCurSDLoc();
AtomicOrdering SuccessOrder = I.getSuccessOrdering();
AtomicOrdering FailureOrder = I.getFailureOrdering();
- SynchronizationScope Scope = I.getSynchScope();
+ SyncScope::ID SSID = I.getSyncScopeID();
SDValue InChain = getRoot();
@@ -4000,7 +4000,7 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain,
getValue(I.getPointerOperand()), getValue(I.getCompareOperand()),
getValue(I.getNewValOperand()), MachinePointerInfo(I.getPointerOperand()),
- /*Alignment=*/ 0, SuccessOrder, FailureOrder, Scope);
+ /*Alignment=*/ 0, SuccessOrder, FailureOrder, SSID);
SDValue OutChain = L.getValue(2);
@@ -4026,7 +4026,7 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
}
AtomicOrdering Order = I.getOrdering();
- SynchronizationScope Scope = I.getSynchScope();
+ SyncScope::ID SSID = I.getSyncScopeID();
SDValue InChain = getRoot();
@@ -4037,7 +4037,7 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
getValue(I.getPointerOperand()),
getValue(I.getValOperand()),
I.getPointerOperand(),
- /* Alignment=*/ 0, Order, Scope);
+ /* Alignment=*/ 0, Order, SSID);
SDValue OutChain = L.getValue(1);
@@ -4052,7 +4052,7 @@ void SelectionDAGBuilder::visitFence(const FenceInst &I) {
Ops[0] = getRoot();
Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
TLI.getFenceOperandTy(DAG.getDataLayout()));
- Ops[2] = DAG.getConstant(I.getSynchScope(), dl,
+ Ops[2] = DAG.getConstant(I.getSyncScopeID(), dl,
TLI.getFenceOperandTy(DAG.getDataLayout()));
DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops));
}
@@ -4060,7 +4060,7 @@ void SelectionDAGBuilder::visitFence(const FenceInst &I) {
void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
SDLoc dl = getCurSDLoc();
AtomicOrdering Order = I.getOrdering();
- SynchronizationScope Scope = I.getSynchScope();
+ SyncScope::ID SSID = I.getSyncScopeID();
SDValue InChain = getRoot();
@@ -4078,7 +4078,7 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
VT.getStoreSize(),
I.getAlignment() ? I.getAlignment() :
DAG.getEVTAlignment(VT),
- AAMDNodes(), nullptr, Scope, Order);
+ AAMDNodes(), nullptr, SSID, Order);
InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
SDValue L =
@@ -4095,7 +4095,7 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
SDLoc dl = getCurSDLoc();
AtomicOrdering Order = I.getOrdering();
- SynchronizationScope Scope = I.getSynchScope();
+ SyncScope::ID SSID = I.getSyncScopeID();
SDValue InChain = getRoot();
@@ -4112,7 +4112,7 @@ void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
getValue(I.getPointerOperand()),
getValue(I.getValueOperand()),
I.getPointerOperand(), I.getAlignment(),
- Order, Scope);
+ Order, SSID);
DAG.setRoot(OutChain);
}
diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp
index c7f112887a3..80371780fb6 100644
--- a/lib/IR/AsmWriter.cpp
+++ b/lib/IR/AsmWriter.cpp
@@ -2119,6 +2119,8 @@ class AssemblyWriter {
bool ShouldPreserveUseListOrder;
UseListOrderStack UseListOrders;
SmallVector<StringRef, 8> MDNames;
+ /// Synchronization scope names registered with LLVMContext.
+ SmallVector<StringRef, 8> SSNs;
public:
/// Construct an AssemblyWriter with an external SlotTracker
@@ -2134,10 +2136,15 @@ public:
void writeOperand(const Value *Op, bool PrintType);
void writeParamOperand(const Value *Operand, AttributeSet Attrs);
void writeOperandBundles(ImmutableCallSite CS);
- void writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope);
- void writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+ void writeSyncScope(const LLVMContext &Context,
+ SyncScope::ID SSID);
+ void writeAtomic(const LLVMContext &Context,
+ AtomicOrdering Ordering,
+ SyncScope::ID SSID);
+ void writeAtomicCmpXchg(const LLVMContext &Context,
+ AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope);
+ SyncScope::ID SSID);
void writeAllMDNodes();
void writeMDNode(unsigned Slot, const MDNode *Node);
@@ -2199,30 +2206,42 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) {
WriteAsOperandInternal(Out, Operand, &TypePrinter, &Machine, TheModule);
}
-void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
- if (Ordering == AtomicOrdering::NotAtomic)
- return;
+void AssemblyWriter::writeSyncScope(const LLVMContext &Context,
+ SyncScope::ID SSID) {
+ switch (SSID) {
+ case SyncScope::System: {
+ break;
+ }
+ default: {
+ if (SSNs.empty())
+ Context.getSyncScopeNames(SSNs);
- switch (SynchScope) {
- case SingleThread: Out << " singlethread"; break;
- case CrossThread: break;
+ Out << " syncscope(\"";
+ PrintEscapedString(SSNs[SSID], Out);
+ Out << "\")";
+ break;
+ }
}
+}
+
+void AssemblyWriter::writeAtomic(const LLVMContext &Context,
+ AtomicOrdering Ordering,
+ SyncScope::ID SSID) {
+ if (Ordering == AtomicOrdering::NotAtomic)
+ return;
+ writeSyncScope(Context, SSID);
Out << " " << toIRString(Ordering);
}
-void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
+void AssemblyWriter::writeAtomicCmpXchg(const LLVMContext &Context,
+ AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
FailureOrdering != AtomicOrdering::NotAtomic);
- switch (SynchScope) {
- case SingleThread: Out << " singlethread"; break;
- case CrossThread: break;
- }
-
+ writeSyncScope(Context, SSID);
Out << " " << toIRString(SuccessOrdering);
Out << " " << toIRString(FailureOrdering);
}
@@ -3215,21 +3234,22 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
// Print atomic ordering/alignment for memory operations
if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
if (LI->isAtomic())
- writeAtomic(LI->getOrdering(), LI->getSynchScope());
+ writeAtomic(LI->getContext(), LI->getOrdering(), LI->getSyncScopeID());
if (LI->getAlignment())
Out << ", align " << LI->getAlignment();
} else if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
if (SI->isAtomic())
- writeAtomic(SI->getOrdering(), SI->getSynchScope());
+ writeAtomic(SI->getContext(), SI->getOrdering(), SI->getSyncScopeID());
if (SI->getAlignment())
Out << ", align " << SI->getAlignment();
} else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(&I)) {
- writeAtomicCmpXchg(CXI->getSuccessOrdering(), CXI->getFailureOrdering(),
- CXI->getSynchScope());
+ writeAtomicCmpXchg(CXI->getContext(), CXI->getSuccessOrdering(),
+ CXI->getFailureOrdering(), CXI->getSyncScopeID());
} else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(&I)) {
- writeAtomic(RMWI->getOrdering(), RMWI->getSynchScope());
+ writeAtomic(RMWI->getContext(), RMWI->getOrdering(),
+ RMWI->getSyncScopeID());
} else if (const FenceInst *FI = dyn_cast<FenceInst>(&I)) {
- writeAtomic(FI->getOrdering(), FI->getSynchScope());
+ writeAtomic(FI->getContext(), FI->getOrdering(), FI->getSyncScopeID());
}
// Print Metadata info.
diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp
index 192a565de0d..2165ae5a947 100644
--- a/lib/IR/Core.cpp
+++ b/lib/IR/Core.cpp
@@ -2756,11 +2756,14 @@ static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) {
llvm_unreachable("Invalid AtomicOrdering value!");
}
+// TODO: Should this and other atomic instructions support building with
+// "syncscope"?
LLVMValueRef LLVMBuildFence(LLVMBuilderRef B, LLVMAtomicOrdering Ordering,
LLVMBool isSingleThread, const char *Name) {
return wrap(
unwrap(B)->CreateFence(mapFromLLVMOrdering(Ordering),
- isSingleThread ? SingleThread : CrossThread,
+ isSingleThread ? SyncScope::SingleThread
+ : SyncScope::System,
Name));
}
@@ -3042,7 +3045,8 @@ LLVMValueRef LLVMBuildAtomicRMW(LLVMBuilderRef B,LLVMAtomicRMWBinOp op,
case LLVMAtomicRMWBinOpUMin: intop = AtomicRMWInst::UMin; break;
}
return wrap(unwrap(B)->CreateAtomicRMW(intop, unwrap(PTR), unwrap(Val),
- mapFromLLVMOrdering(ordering), singleThread ? SingleThread : CrossThread));
+ mapFromLLVMOrdering(ordering), singleThread ? SyncScope::SingleThread
+ : SyncScope::System));
}
LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
@@ -3054,7 +3058,7 @@ LLVMValueRef LLVMBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Ptr,
return wrap(unwrap(B)->CreateAtomicCmpXchg(unwrap(Ptr), unwrap(Cmp),
unwrap(New), mapFromLLVMOrdering(SuccessOrdering),
mapFromLLVMOrdering(FailureOrdering),
- singleThread ? SingleThread : CrossThread));
+ singleThread ? SyncScope::SingleThread : SyncScope::System));
}
@@ -3062,17 +3066,18 @@ LLVMBool LLVMIsAtomicSingleThread(LLVMValueRef AtomicInst) {
Value *P = unwrap<Value>(AtomicInst);
if (AtomicRMWInst *I = dyn_cast<AtomicRMWInst>(P))
- return I->getSynchScope() == SingleThread;
- return cast<AtomicCmpXchgInst>(P)->getSynchScope() == SingleThread;
+ return I->getSyncScopeID() == SyncScope::SingleThread;
+ return cast<AtomicCmpXchgInst>(P)->getSyncScopeID() ==
+ SyncScope::SingleThread;
}
void LLVMSetAtomicSingleThread(LLVMValueRef AtomicInst, LLVMBool NewValue) {
Value *P = unwrap<Value>(AtomicInst);
- SynchronizationScope Sync = NewValue ? SingleThread : CrossThread;
+ SyncScope::ID SSID = NewValue ? SyncScope::SingleThread : SyncScope::System;
if (AtomicRMWInst *I = dyn_cast<AtomicRMWInst>(P))
- return I->setSynchScope(Sync);
- return cast<AtomicCmpXchgInst>(P)->setSynchScope(Sync);
+ return I->setSyncScopeID(SSID);
+ return cast<AtomicCmpXchgInst>(P)->setSyncScopeID(SSID);
}
LLVMAtomicOrdering LLVMGetCmpXchgSuccessOrdering(LLVMValueRef CmpXchgInst) {
diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp
index 3dd653d2d04..365cb019aec 100644
--- a/lib/IR/Instruction.cpp
+++ b/lib/IR/Instruction.cpp
@@ -362,13 +362,13 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
(LI->getAlignment() == cast<LoadInst>(I2)->getAlignment() ||
IgnoreAlignment) &&
LI->getOrdering() == cast<LoadInst>(I2)->getOrdering() &&
- LI->getSynchScope() == cast<LoadInst>(I2)->getSynchScope();
+ LI->getSyncScopeID() == cast<LoadInst>(I2)->getSyncScopeID();
if (const StoreInst *SI = dyn_cast<StoreInst>(I1))
return SI->isVolatile() == cast<StoreInst>(I2)->isVolatile() &&
(SI->getAlignment() == cast<StoreInst>(I2)->getAlignment() ||
IgnoreAlignment) &&
SI->getOrdering() == cast<StoreInst>(I2)->getOrdering() &&
- SI->getSynchScope() == cast<StoreInst>(I2)->getSynchScope();
+ SI->getSyncScopeID() == cast<StoreInst>(I2)->getSyncScopeID();
if (const CmpInst *CI = dyn_cast<CmpInst>(I1))
return CI->getPredicate() == cast<CmpInst>(I2)->getPredicate();
if (const CallInst *CI = dyn_cast<CallInst>(I1))
@@ -386,7 +386,7 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
return EVI->getIndices() == cast<ExtractValueInst>(I2)->getIndices();
if (const FenceInst *FI = dyn_cast<FenceInst>(I1))
return FI->getOrdering() == cast<FenceInst>(I2)->getOrdering() &&
- FI->getSynchScope() == cast<FenceInst>(I2)->getSynchScope();
+ FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
@@ -394,12 +394,13 @@ static bool haveSameSpecialState(const Instruction *I1, const Instruction *I2,
cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
CXI->getFailureOrdering() ==
cast<AtomicCmpXchgInst>(I2)->getFailureOrdering() &&
- CXI->getSynchScope() == cast<AtomicCmpXchgInst>(I2)->getSynchScope();
+ CXI->getSyncScopeID() ==
+ cast<AtomicCmpXchgInst>(I2)->getSyncScopeID();
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
- RMWI->getSynchScope() == cast<AtomicRMWInst>(I2)->getSynchScope();
+ RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
return true;
}
diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp
index af58040eb27..2c49564e328 100644
--- a/lib/IR/Instructions.cpp
+++ b/lib/IR/Instructions.cpp
@@ -1304,34 +1304,34 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, Instruction *InsertBef)
: LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertBef) {}
+ SyncScope::System, InsertBef) {}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, BasicBlock *InsertAE)
: LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertAE) {}
+ SyncScope::System, InsertAE) {}
LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope, Instruction *InsertBef)
+ SyncScope::ID SSID, Instruction *InsertBef)
: UnaryInstruction(Ty, Load, Ptr, InsertBef) {
assert(Ty == cast<PointerType>(Ptr->getType())->getElementType());
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
setName(Name);
}
LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAE)
: UnaryInstruction(cast<PointerType>(Ptr->getType())->getElementType(),
Load, Ptr, InsertAE) {
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
setName(Name);
}
@@ -1419,16 +1419,16 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
Instruction *InsertBefore)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertBefore) {}
+ SyncScope::System, InsertBefore) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
BasicBlock *InsertAtEnd)
: StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
- CrossThread, InsertAtEnd) {}
+ SyncScope::System, InsertAtEnd) {}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
@@ -1438,13 +1438,13 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
}
StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
unsigned Align, AtomicOrdering Order,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(val->getContext()), Store,
OperandTraits<StoreInst>::op_begin(this),
@@ -1454,7 +1454,7 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
Op<1>() = addr;
setVolatile(isVolatile);
setAlignment(Align);
- setAtomic(Order, SynchScope);
+ setAtomic(Order, SSID);
AssertOK();
}
@@ -1474,13 +1474,13 @@ void StoreInst::setAlignment(unsigned Align) {
void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
Op<0>() = Ptr;
Op<1>() = Cmp;
Op<2>() = NewVal;
setSuccessOrdering(SuccessOrdering);
setFailureOrdering(FailureOrdering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
assert(getOperand(0) && getOperand(1) && getOperand(2) &&
"All operands must be non-null!");
@@ -1507,25 +1507,25 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(
StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
- Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
}
AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
AtomicOrdering SuccessOrdering,
AtomicOrdering FailureOrdering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(
StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
- Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SynchScope);
+ Init(Ptr, Cmp, NewVal, SuccessOrdering, FailureOrdering, SSID);
}
//===----------------------------------------------------------------------===//
@@ -1534,12 +1534,12 @@ AtomicCmpXchgInst::AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal,
void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope) {
+ SyncScope::ID SSID) {
Op<0>() = Ptr;
Op<1>() = Val;
setOperation(Operation);
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
assert(getOperand(0) && getOperand(1) &&
"All operands must be non-null!");
@@ -1554,24 +1554,24 @@ void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Val->getType(), AtomicRMW,
OperandTraits<AtomicRMWInst>::op_begin(this),
OperandTraits<AtomicRMWInst>::operands(this),
InsertBefore) {
- Init(Operation, Ptr, Val, Ordering, SynchScope);
+ Init(Operation, Ptr, Val, Ordering, SSID);
}
AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Val->getType(), AtomicRMW,
OperandTraits<AtomicRMWInst>::op_begin(this),
OperandTraits<AtomicRMWInst>::operands(this),
InsertAtEnd) {
- Init(Operation, Ptr, Val, Ordering, SynchScope);
+ Init(Operation, Ptr, Val, Ordering, SSID);
}
//===----------------------------------------------------------------------===//
@@ -1579,19 +1579,19 @@ AtomicRMWInst::AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val,
//===----------------------------------------------------------------------===//
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
- SynchronizationScope SynchScope,
+ SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
setOrdering(Ordering);
- setSynchScope(SynchScope);
+ setSyncScopeID(SSID);
}
//===----------------------------------------------------------------------===//
@@ -3795,12 +3795,12 @@ AllocaInst *AllocaInst::cloneImpl() const {
LoadInst *LoadInst::cloneImpl() const {
return new LoadInst(getOperand(0), Twine(), isVolatile(),
- getAlignment(), getOrdering(), getSynchScope());
+ getAlignment(), getOrdering(), getSyncScopeID());
}
StoreInst *StoreInst::cloneImpl() const {
return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
- getAlignment(), getOrdering(), getSynchScope());
+ getAlignment(), getOrdering(), getSyncScopeID());
}
@@ -3808,7 +3808,7 @@ AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
AtomicCmpXchgInst *Result =
new AtomicCmpXchgInst(getOperand(0), getOperand(1), getOperand(2),
getSuccessOrdering(), getFailureOrdering(),
- getSynchScope());
+ getSyncScopeID());
Result->setVolatile(isVolatile());
Result->setWeak(isWeak());
return Result;
@@ -3816,14 +3816,14 @@ AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
AtomicRMWInst *AtomicRMWInst::cloneImpl() const {
AtomicRMWInst *Result =
- new AtomicRMWInst(getOperation(),getOperand(0), getOperand(1),
- getOrdering(), getSynchScope());
+ new AtomicRMWInst(getOperation(), getOperand(0), getOperand(1),
+ getOrdering(), getSyncScopeID());
Result->setVolatile(isVolatile());
return Result;
}
FenceInst *FenceInst::cloneImpl() const {
- return new FenceInst(getContext(), getOrdering(), getSynchScope());
+ return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
}
TruncInst *TruncInst::cloneImpl() const {
diff --git a/lib/IR/LLVMContext.cpp b/lib/IR/LLVMContext.cpp
index 2e13f362344..1463cdb60fb 100644
--- a/lib/IR/LLVMContext.cpp
+++ b/lib/IR/LLVMContext.cpp
@@ -81,6 +81,16 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
assert(GCTransitionEntry->second == LLVMContext::OB_gc_transition &&
"gc-transition operand bundle id drifted!");
(void)GCTransitionEntry;
+
+ SyncScope::ID SingleThreadSSID =
+ pImpl->getOrInsertSyncScopeID("singlethread");
+ assert(SingleThreadSSID == SyncScope::SingleThread &&
+ "singlethread synchronization scope ID drifted!");
+
+ SyncScope::ID SystemSSID =
+ pImpl->getOrInsertSyncScopeID("");
+ assert(SystemSSID == SyncScope::System &&
+ "system synchronization scope ID drifted!");
}
LLVMContext::~LLVMContext() { delete pImpl; }
@@ -255,6 +265,14 @@ uint32_t LLVMContext::getOperandBundleTagID(StringRef Tag) const {
return pImpl->getOperandBundleTagID(Tag);
}
+SyncScope::ID LLVMContext::getOrInsertSyncScopeID(StringRef SSN) {
+ return pImpl->getOrInsertSyncScopeID(SSN);
+}
+
+void LLVMContext::getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const {
+ pImpl->getSyncScopeNames(SSNs);
+}
+
void LLVMContext::setGC(const Function &Fn, std::string GCName) {
auto It = pImpl->GCNames.find(&Fn);
diff --git a/lib/IR/LLVMContextImpl.cpp b/lib/IR/LLVMContextImpl.cpp
index c19e1be44fd..57dd08b36fe 100644
--- a/lib/IR/LLVMContextImpl.cpp
+++ b/lib/IR/LLVMContextImpl.cpp
@@ -205,6 +205,20 @@ uint32_t LLVMContextImpl::getOperandBundleTagID(StringRef Tag) const {
return I->second;
}
+SyncScope::ID LLVMContextImpl::getOrInsertSyncScopeID(StringRef SSN) {
+ auto NewSSID = SSC.size();
+ assert(NewSSID < std::numeric_limits<SyncScope::ID>::max() &&
+ "Hit the maximum number of synchronization scopes allowed!");
+ return SSC.insert(std::make_pair(SSN, SyncScope::ID(NewSSID))).first->second;
+}
+
+void LLVMContextImpl::getSyncScopeNames(
+ SmallVectorImpl<StringRef> &SSNs) const {
+ SSNs.resize(SSC.size());
+ for (const auto &SSE : SSC)
+ SSNs[SSE.second] = SSE.first();
+}
+
/// Singleton instance of the OptBisect class.
///
/// This singleton is accessed via the LLVMContext::getOptBisect() function. It
diff --git a/lib/IR/LLVMContextImpl.h b/lib/IR/LLVMContextImpl.h
index 395beb57fe3..e413a4f3443 100644
--- a/lib/IR/LLVMContextImpl.h
+++ b/lib/IR/LLVMContextImpl.h
@@ -1297,6 +1297,20 @@ public:
void getOperandBundleTags(SmallVectorImpl<StringRef> &Tags) const;
uint32_t getOperandBundleTagID(StringRef Tag) const;
+ /// A set of interned synchronization scopes. The StringMap maps
+ /// synchronization scope names to their respective synchronization scope IDs.
+ StringMap<SyncScope::ID> SSC;
+
+ /// getOrInsertSyncScopeID - Maps synchronization scope name to
+ /// synchronization scope ID. Every synchronization scope registered with
+ /// LLVMContext has unique ID except pre-defined ones.
+ SyncScope::ID getOrInsertSyncScopeID(StringRef SSN);
+
+ /// getSyncScopeNames - Populates client supplied SmallVector with
+ /// synchronization scope names registered with LLVMContext. Synchronization
+ /// scope names are ordered by increasing synchronization scope IDs.
+ void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const;
+
/// Maintain the GC name for each function.
///
/// This saves allocating an additional word in Function for programs which
diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp
index 7039bd34988..49384bf975a 100644
--- a/lib/IR/Verifier.cpp
+++ b/lib/IR/Verifier.cpp
@@ -3108,7 +3108,7 @@ void Verifier::visitLoadInst(LoadInst &LI) {
ElTy, &LI);
checkAtomicMemAccessSize(ElTy, &LI);
} else {
- Assert(LI.getSynchScope() == CrossThread,
+ Assert(LI.getSyncScopeID() == SyncScope::System,
"Non-atomic load cannot have SynchronizationScope specified", &LI);
}
@@ -3137,7 +3137,7 @@ void Verifier::visitStoreInst(StoreInst &SI) {
ElTy, &SI);
checkAtomicMemAccessSize(ElTy, &SI);
} else {
- Assert(SI.getSynchScope() == CrossThread,
+ Assert(SI.getSyncScopeID() == SyncScope::System,
"Non-atomic store cannot have SynchronizationScope specified", &SI);
}
visitInstruction(SI);
diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp
index 4ff9f79898f..6ba7593543a 100644
--- a/lib/Target/ARM/ARMISelLowering.cpp
+++ b/lib/Target/ARM/ARMISelLowering.cpp
@@ -3398,9 +3398,9 @@ ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
const ARMSubtarget *Subtarget) {
SDLoc dl(Op);
- ConstantSDNode *ScopeN = cast<ConstantSDNode>(Op.getOperand(2));
- auto Scope = static_cast<SynchronizationScope>(ScopeN->getZExtValue());
- if (Scope == SynchronizationScope::SingleThread)
+ ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2));
+ auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue());
+ if (SSID == SyncScope::SingleThread)
return Op;
if (!Subtarget->hasDataBarrier()) {
diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp
index 937bb6d229d..2801141cd95 100644
--- a/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -3182,13 +3182,13 @@ SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
SDLoc DL(Op);
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
- SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
+ SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
- FenceScope == CrossThread) {
+ FenceSSID == SyncScope::System) {
return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
Op.getOperand(0)),
0);
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 4e160eccb3a..3d08037e728 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -22850,7 +22850,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
auto Builder = IRBuilder<>(AI);
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
- auto SynchScope = AI->getSynchScope();
+ auto SSID = AI->getSyncScopeID();
// We must restrict the ordering to avoid generating loads with Release or
// ReleaseAcquire orderings.
auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
@@ -22872,7 +22872,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// otherwise, we might be able to be more aggressive on relaxed idempotent
// rmw. In practice, they do not look useful, so we don't try to be
// especially clever.
- if (SynchScope == SingleThread)
+ if (SSID == SyncScope::SingleThread)
// FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
// the IR level, so we must wrap it in an intrinsic.
return nullptr;
@@ -22891,7 +22891,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
// Finally we can emit the atomic load.
LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
AI->getType()->getPrimitiveSizeInBits());
- Loaded->setAtomic(Order, SynchScope);
+ Loaded->setAtomic(Order, SSID);
AI->replaceAllUsesWith(Loaded);
AI->eraseFromParent();
return Loaded;
@@ -22902,13 +22902,13 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
SDLoc dl(Op);
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
- SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
+ SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
- FenceScope == CrossThread) {
+ FenceSSID == SyncScope::System) {
if (Subtarget.hasMFence())
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp
index 2ce868995bd..93eab680ca6 100644
--- a/lib/Transforms/IPO/GlobalOpt.cpp
+++ b/lib/Transforms/IPO/GlobalOpt.cpp
@@ -837,7 +837,7 @@ OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
// The global is initialized when the store to it occurs.
new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
- SI->getOrdering(), SI->getSynchScope(), SI);
+ SI->getOrdering(), SI->getSyncScopeID(), SI);
SI->eraseFromParent();
continue;
}
@@ -854,7 +854,7 @@ OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
// Replace the cmp X, 0 with a use of the bool value.
// Sink the load to where the compare was, if atomic rules allow us to.
Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
- LI->getOrdering(), LI->getSynchScope(),
+ LI->getOrdering(), LI->getSyncScopeID(),
LI->isUnordered() ? (Instruction*)ICI : LI);
InitBoolUsed = true;
switch (ICI->getPredicate()) {
@@ -1605,7 +1605,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
assert(LI->getOperand(0) == GV && "Not a copy!");
// Insert a new load, to preserve the saved value.
StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
- LI->getOrdering(), LI->getSynchScope(), LI);
+ LI->getOrdering(), LI->getSyncScopeID(), LI);
} else {
assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
"This is not a form that we understand!");
@@ -1614,12 +1614,12 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
}
}
new StoreInst(StoreVal, NewGV, false, 0,
- SI->getOrdering(), SI->getSynchScope(), SI);
+ SI->getOrdering(), SI->getSyncScopeID(), SI);
} else {
// Change the load into a load of bool then a select.
LoadInst *LI = cast<LoadInst>(UI);
LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
- LI->getOrdering(), LI->getSynchScope(), LI);
+ LI->getOrdering(), LI->getSyncScopeID(), LI);
Value *NSI;
if (IsOneZero)
NSI = new ZExtInst(NLI, LI->getType(), "", LI);
diff --git a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 05936a94629..c59e1ce69ac 100644
--- a/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -461,7 +461,7 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT
LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
- NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
+ NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
MDBuilder MDB(NewLoad->getContext());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
@@ -521,7 +521,7 @@ static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value
StoreInst *NewStore = IC.Builder.CreateAlignedStore(
V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
SI.getAlignment(), SI.isVolatile());
- NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
+ NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
for (const auto &MDPair : MD) {
unsigned ID = MDPair.first;
MDNode *N = MDPair.second;
@@ -1025,9 +1025,9 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
SI->getOperand(2)->getName()+".val");
assert(LI.isUnordered() && "implied by above");
V1->setAlignment(Align);
- V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
+ V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
V2->setAlignment(Align);
- V2->setAtomic(LI.getOrdering(), LI.getSynchScope());
+ V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
return SelectInst::Create(SI->getCondition(), V1, V2);
}
@@ -1540,7 +1540,7 @@ bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
SI.isVolatile(),
SI.getAlignment(),
SI.getOrdering(),
- SI.getSynchScope());
+ SI.getSyncScopeID());
InsertNewInstBefore(NewSI, *BBI);
// The debug locations of the original instructions might differ; merge them.
NewSI->setDebugLoc(DILocation::getMergedLocation(SI.getDebugLoc(),
diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
index a991792bf5a..ec6904486e1 100644
--- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
+++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp
@@ -379,10 +379,11 @@ void ThreadSanitizer::chooseInstructionsToInstrument(
}
static bool isAtomic(Instruction *I) {
+ // TODO: Ask TTI whether synchronization scope is between threads.
if (LoadInst *LI = dyn_cast<LoadInst>(I))
- return LI->isAtomic() && LI->getSynchScope() == CrossThread;
+ return LI->isAtomic() && LI->getSyncScopeID() != SyncScope::SingleThread;
if (StoreInst *SI = dyn_cast<StoreInst>(I))
- return SI->isAtomic() && SI->getSynchScope() == CrossThread;
+ return SI->isAtomic() && SI->getSyncScopeID() != SyncScope::SingleThread;
if (isa<AtomicRMWInst>(I))
return true;
if (isa<AtomicCmpXchgInst>(I))
@@ -676,7 +677,7 @@ bool ThreadSanitizer::instrumentAtomic(Instruction *I, const DataLayout &DL) {
I->eraseFromParent();
} else if (FenceInst *FI = dyn_cast<FenceInst>(I)) {
Value *Args[] = {createOrdering(&IRB, FI->getOrdering())};
- Function *F = FI->getSynchScope() == SingleThread ?
+ Function *F = FI->getSyncScopeID() == SyncScope::SingleThread ?
TsanAtomicSignalFence : TsanAtomicThreadFence;
CallInst *C = CallInst::Create(F, Args);
ReplaceInstWithInst(I, C);
diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
index f6783f5c3db..0fe72f3f733 100644
--- a/lib/Transforms/Scalar/GVN.cpp
+++ b/lib/Transforms/Scalar/GVN.cpp
@@ -1166,7 +1166,7 @@ bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock,
auto *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre",
LI->isVolatile(), LI->getAlignment(),
- LI->getOrdering(), LI->getSynchScope(),
+ LI->getOrdering(), LI->getSyncScopeID(),
UnavailablePred->getTerminator());
// Transfer the old load's AA tags to the new load.
diff --git a/lib/Transforms/Scalar/JumpThreading.cpp b/lib/Transforms/Scalar/JumpThreading.cpp
index 05293eb0079..ee3de51b136 100644
--- a/lib/Transforms/Scalar/JumpThreading.cpp
+++ b/lib/Transforms/Scalar/JumpThreading.cpp
@@ -1212,7 +1212,7 @@ bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
LoadInst *NewVal = new LoadInst(
LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred),
LI->getName() + ".pr", false, LI->getAlignment(), LI->getOrdering(),
- LI->getSynchScope(), UnavailablePred->getTerminator());
+ LI->getSyncScopeID(), UnavailablePred->getTerminator());
NewVal->setDebugLoc(LI->getDebugLoc());
if (AATags)
NewVal->setAAMetadata(AATags);
diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp
index f8497e9c5fe..b9cee5b2ba9 100644
--- a/lib/Transforms/Scalar/SROA.cpp
+++ b/lib/Transforms/Scalar/SROA.cpp
@@ -2398,7 +2398,7 @@ private:
LoadInst *NewLI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
LI.isVolatile(), LI.getName());
if (LI.isVolatile())
- NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope());
+ NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
// Any !nonnull metadata or !range metadata on the old load is also valid
// on the new load. This is even true in some cases even when the loads
@@ -2433,7 +2433,7 @@ private:
getSliceAlign(TargetTy),
LI.isVolatile(), LI.getName());
if (LI.isVolatile())
- NewLI->setAtomic(LI.getOrdering(), LI.getSynchScope());
+ NewLI->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
V = NewLI;
IsPtrAdjusted = true;
@@ -2576,7 +2576,7 @@ private:
}
NewSI->copyMetadata(SI, LLVMContext::MD_mem_parallel_loop_access);
if (SI.isVolatile())
- NewSI->setAtomic(SI.getOrdering(), SI.getSynchScope());
+ NewSI->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
Pass.DeadInsts.insert(&SI);
deleteIfTriviallyDead(OldOp);
diff --git a/lib/Transforms/Utils/FunctionComparator.cpp b/lib/Transforms/Utils/FunctionComparator.cpp
index 0457294361b..4a2be3a5317 100644
--- a/lib/Transforms/Utils/FunctionComparator.cpp
+++ b/lib/Transforms/Utils/FunctionComparator.cpp
@@ -513,8 +513,8 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res =
cmpOrderings(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
return Res;
- if (int Res =
- cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
+ if (int Res = cmpNumbers(LI->getSyncScopeID(),
+ cast<LoadInst>(R)->getSyncScopeID()))
return Res;
return cmpRangeMetadata(LI->getMetadata(LLVMContext::MD_range),
cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
@@ -529,7 +529,8 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res =
cmpOrderings(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
return Res;
- return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope());
+ return cmpNumbers(SI->getSyncScopeID(),
+ cast<StoreInst>(R)->getSyncScopeID());
}
if (const CmpInst *CI = dyn_cast<CmpInst>(L))
return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
@@ -584,7 +585,8 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res =
cmpOrderings(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
return Res;
- return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope());
+ return cmpNumbers(FI->getSyncScopeID(),
+ cast<FenceInst>(R)->getSyncScopeID());
}
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
if (int Res = cmpNumbers(CXI->isVolatile(),
@@ -601,8 +603,8 @@ int FunctionComparator::cmpOperations(const Instruction *L,
cmpOrderings(CXI->getFailureOrdering(),
cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
return Res;
- return cmpNumbers(CXI->getSynchScope(),
- cast<AtomicCmpXchgInst>(R)->getSynchScope());
+ return cmpNumbers(CXI->getSyncScopeID(),
+ cast<AtomicCmpXchgInst>(R)->getSyncScopeID());
}
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
if (int Res = cmpNumbers(RMWI->getOperation(),
@@ -614,8 +616,8 @@ int FunctionComparator::cmpOperations(const Instruction *L,
if (int Res = cmpOrderings(RMWI->getOrdering(),
cast<AtomicRMWInst>(R)->getOrdering()))
return Res;
- return cmpNumbers(RMWI->getSynchScope(),
- cast<AtomicRMWInst>(R)->getSynchScope());
+ return cmpNumbers(RMWI->getSyncScopeID(),
+ cast<AtomicRMWInst>(R)->getSyncScopeID());
}
if (const PHINode *PNL = dyn_cast<PHINode>(L)) {
const PHINode *PNR = cast<PHINode>(R);