summaryrefslogtreecommitdiff
path: root/lib/xray/xray_function_call_trie.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/xray/xray_function_call_trie.h')
-rw-r--r--lib/xray/xray_function_call_trie.h63
1 files changed, 20 insertions, 43 deletions
diff --git a/lib/xray/xray_function_call_trie.h b/lib/xray/xray_function_call_trie.h
index e6e31cec2..2acf14aa5 100644
--- a/lib/xray/xray_function_call_trie.h
+++ b/lib/xray/xray_function_call_trie.h
@@ -119,9 +119,9 @@ public:
// We add a constructor here to allow us to inplace-construct through
// Array<...>'s AppendEmplace.
- Node(Node *P, NodeIdPairAllocatorType &A, ChunkAllocator &CA, int64_t CC,
- int64_t CLT, int32_t F)
- : Parent(P), Callees(A, CA), CallCount(CC), CumulativeLocalTime(CLT),
+ Node(Node *P, NodeIdPairAllocatorType &A, int64_t CC, int64_t CLT,
+ int32_t F)
+ : Parent(P), Callees(A), CallCount(CC), CumulativeLocalTime(CLT),
FId(F) {}
// TODO: Include the compact histogram.
@@ -153,7 +153,6 @@ public:
RootAllocatorType *RootAllocator = nullptr;
ShadowStackAllocatorType *ShadowStackAllocator = nullptr;
NodeIdPairAllocatorType *NodeIdPairAllocator = nullptr;
- ChunkAllocator *ChunkAlloc = nullptr;
Allocators() {}
Allocators(const Allocators &) = delete;
@@ -162,12 +161,11 @@ public:
Allocators(Allocators &&O)
: NodeAllocator(O.NodeAllocator), RootAllocator(O.RootAllocator),
ShadowStackAllocator(O.ShadowStackAllocator),
- NodeIdPairAllocator(O.NodeIdPairAllocator), ChunkAlloc(O.ChunkAlloc) {
+ NodeIdPairAllocator(O.NodeIdPairAllocator) {
O.NodeAllocator = nullptr;
O.RootAllocator = nullptr;
O.ShadowStackAllocator = nullptr;
O.NodeIdPairAllocator = nullptr;
- O.ChunkAlloc = nullptr;
}
Allocators &operator=(Allocators &&O) {
@@ -191,11 +189,6 @@ public:
O.NodeIdPairAllocator = this->NodeIdPairAllocator;
this->NodeIdPairAllocator = Tmp;
}
- {
- auto Tmp = O.ChunkAlloc;
- O.ChunkAlloc = this->ChunkAlloc;
- this->ChunkAlloc = Tmp;
- }
return *this;
}
@@ -223,11 +216,6 @@ public:
InternalFree(NodeIdPairAllocator);
NodeIdPairAllocator = nullptr;
}
- if (ChunkAlloc != nullptr) {
- ChunkAlloc->~ChunkAllocator();
- InternalFree(ChunkAlloc);
- ChunkAlloc = nullptr;
- }
}
};
@@ -258,11 +246,6 @@ public:
InternalAlloc(sizeof(NodeIdPairAllocatorType)));
new (NodeIdPairAllocator) NodeIdPairAllocatorType(Max);
A.NodeIdPairAllocator = NodeIdPairAllocator;
-
- auto ChunkAlloc = reinterpret_cast<ChunkAllocator *>(
- InternalAlloc(sizeof(ChunkAllocator)));
- new (ChunkAlloc) ChunkAllocator(Max);
- A.ChunkAlloc = ChunkAlloc;
return A;
}
@@ -271,22 +254,20 @@ private:
RootArray Roots;
ShadowStackArray ShadowStack;
NodeIdPairAllocatorType *NodeIdPairAllocator = nullptr;
- ChunkAllocator *ChunkAlloc = nullptr;
public:
explicit FunctionCallTrie(const Allocators &A)
- : Nodes(*A.NodeAllocator, *A.ChunkAlloc),
- Roots(*A.RootAllocator, *A.ChunkAlloc),
- ShadowStack(*A.ShadowStackAllocator, *A.ChunkAlloc),
- NodeIdPairAllocator(A.NodeIdPairAllocator), ChunkAlloc(A.ChunkAlloc) {}
+ : Nodes(*A.NodeAllocator), Roots(*A.RootAllocator),
+ ShadowStack(*A.ShadowStackAllocator),
+ NodeIdPairAllocator(A.NodeIdPairAllocator) {}
void enterFunction(const int32_t FId, uint64_t TSC) {
DCHECK_NE(FId, 0);
// This function primarily deals with ensuring that the ShadowStack is
// consistent and ready for when an exit event is encountered.
if (UNLIKELY(ShadowStack.empty())) {
- auto NewRoot = Nodes.AppendEmplace(nullptr, *NodeIdPairAllocator,
- *ChunkAlloc, 0, 0, FId);
+ auto NewRoot =
+ Nodes.AppendEmplace(nullptr, *NodeIdPairAllocator, 0, 0, FId);
if (UNLIKELY(NewRoot == nullptr))
return;
Roots.Append(NewRoot);
@@ -309,8 +290,8 @@ public:
}
// This means we've never seen this stack before, create a new node here.
- auto NewNode = Nodes.AppendEmplace(TopNode, *NodeIdPairAllocator,
- *ChunkAlloc, 0, 0, FId);
+ auto NewNode =
+ Nodes.AppendEmplace(TopNode, *NodeIdPairAllocator, 0, 0, FId);
if (UNLIKELY(NewNode == nullptr))
return;
DCHECK_NE(NewNode, nullptr);
@@ -370,13 +351,12 @@ public:
typename Stack::AllocatorType StackAllocator(
profilingFlags()->stack_allocator_max);
- ChunkAllocator StackChunkAllocator(profilingFlags()->stack_allocator_max);
- Stack DFSStack(StackAllocator, StackChunkAllocator);
+ Stack DFSStack(StackAllocator);
for (const auto Root : getRoots()) {
// Add a node in O for this root.
auto NewRoot = O.Nodes.AppendEmplace(
- nullptr, *O.NodeIdPairAllocator, *O.ChunkAlloc, Root->CallCount,
+ nullptr, *O.NodeIdPairAllocator, Root->CallCount,
Root->CumulativeLocalTime, Root->FId);
// Because we cannot allocate more memory we should bail out right away.
@@ -395,9 +375,8 @@ public:
DFSStack.trim(1);
for (const auto Callee : NP.Node->Callees) {
auto NewNode = O.Nodes.AppendEmplace(
- NP.NewNode, *O.NodeIdPairAllocator, *O.ChunkAlloc,
- Callee.NodePtr->CallCount, Callee.NodePtr->CumulativeLocalTime,
- Callee.FId);
+ NP.NewNode, *O.NodeIdPairAllocator, Callee.NodePtr->CallCount,
+ Callee.NodePtr->CumulativeLocalTime, Callee.FId);
if (UNLIKELY(NewNode == nullptr))
return;
NP.NewNode->Callees.AppendEmplace(NewNode, Callee.FId);
@@ -423,16 +402,15 @@ public:
using Stack = Array<NodeAndTarget>;
typename Stack::AllocatorType StackAllocator(
profilingFlags()->stack_allocator_max);
- ChunkAllocator CA(profilingFlags()->stack_allocator_max);
- Stack DFSStack(StackAllocator, CA);
+ Stack DFSStack(StackAllocator);
for (const auto Root : getRoots()) {
Node *TargetRoot = nullptr;
auto R = O.Roots.find_element(
[&](const Node *Node) { return Node->FId == Root->FId; });
if (R == nullptr) {
- TargetRoot = O.Nodes.AppendEmplace(nullptr, *O.NodeIdPairAllocator,
- *O.ChunkAlloc, 0, 0, Root->FId);
+ TargetRoot = O.Nodes.AppendEmplace(nullptr, *O.NodeIdPairAllocator, 0,
+ 0, Root->FId);
if (UNLIKELY(TargetRoot == nullptr))
return;
@@ -456,9 +434,8 @@ public:
return C.FId == Callee.FId;
});
if (TargetCallee == nullptr) {
- auto NewTargetNode =
- O.Nodes.AppendEmplace(NT.TargetNode, *O.NodeIdPairAllocator,
- *O.ChunkAlloc, 0, 0, Callee.FId);
+ auto NewTargetNode = O.Nodes.AppendEmplace(
+ NT.TargetNode, *O.NodeIdPairAllocator, 0, 0, Callee.FId);
if (UNLIKELY(NewTargetNode == nullptr))
return;