1 // Copyright 2000 - 2007 Google Inc.
2 // All rights reserved.
4 // Author: Sanjay Ghemawat
6 // Portable implementation - just use glibc
8 // Note: The glibc implementation may cause a call to malloc.
9 // This can cause a deadlock in HeapProfiler.
11 #ifndef ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
12 #define ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_
18 #include "absl/debugging/stacktrace.h"
20 // Sometimes, we can try to get a stack trace from within a stack
21 // trace, because we don't block signals inside this code (which would be too
22 // expensive: the two extra system calls per stack trace do matter here).
23 // That can cause a self-deadlock.
24 // Protect against such reentrant call by failing to get a stack trace.
26 // We use __thread here because the code here is extremely low level -- it is
27 // called while collecting stack traces from within malloc and mmap, and thus
28 // can not call anything which might call malloc or mmap itself.
29 static __thread int recursive = 0;
31 // The stack trace function might be invoked very early in the program's
32 // execution (e.g. from the very first malloc if using tcmalloc). Also, the
33 // glibc implementation itself will trigger malloc the first time it is called.
34 // As such, we suppress usage of backtrace during this early stage of execution.
35 static std::atomic<bool> disable_stacktraces(true); // Disabled until healthy.
36 // Waiting until static initializers run seems to be late enough.
37 // This file is included into stacktrace.cc so this will only run once.
38 static int stacktraces_enabler = []() {
39 void* unused_stack[1];
40 // Force the first backtrace to happen early to get the one-time shared lib
41 // loading (allocation) out of the way. After the first call it is much safer
42 // to use backtrace from a signal handler if we crash somewhere later.
43 backtrace(unused_stack, 1);
44 disable_stacktraces.store(false, std::memory_order_relaxed);
48 template <bool IS_STACK_FRAMES, bool IS_WITH_CONTEXT>
49 static int UnwindImpl(void** result, int* sizes, int max_depth, int skip_count,
50 const void *ucp, int *min_dropped_frames) {
51 if (recursive || disable_stacktraces.load(std::memory_order_relaxed)) {
56 static_cast<void>(ucp); // Unused.
57 static const int kStackLength = 64;
58 void * stack[kStackLength];
61 size = backtrace(stack, kStackLength);
62 skip_count++; // we want to skip the current frame as well
63 int result_count = size - skip_count;
66 if (result_count > max_depth)
67 result_count = max_depth;
68 for (int i = 0; i < result_count; i++)
69 result[i] = stack[i + skip_count];
71 if (IS_STACK_FRAMES) {
72 // No implementation for finding out the stack frame sizes yet.
73 memset(sizes, 0, sizeof(*sizes) * result_count);
75 if (min_dropped_frames != nullptr) {
76 if (size - skip_count - max_depth > 0) {
77 *min_dropped_frames = size - skip_count - max_depth;
79 *min_dropped_frames = 0;
89 namespace debugging_internal {
90 bool StackTraceWorksForTest() {
93 } // namespace debugging_internal
96 #endif // ABSL_DEBUGGING_INTERNAL_STACKTRACE_GENERIC_INL_H_