PageRenderTime 47ms CodeModel.GetById 9ms app.highlight 33ms RepoModel.GetById 1ms app.codeStats 0ms

/thirdparty/breakpad/processor/stackwalker_amd64.cc

http://github.com/tomahawk-player/tomahawk
C++ | 243 lines | 141 code | 32 blank | 70 comment | 20 complexity | 1b9dec87bfb5a34e3160c65d7905d7ad MD5 | raw file
  1// Copyright (c) 2010 Google Inc.
  2// All rights reserved.
  3//
  4// Redistribution and use in source and binary forms, with or without
  5// modification, are permitted provided that the following conditions are
  6// met:
  7//
  8//     * Redistributions of source code must retain the above copyright
  9// notice, this list of conditions and the following disclaimer.
 10//     * Redistributions in binary form must reproduce the above
 11// copyright notice, this list of conditions and the following disclaimer
 12// in the documentation and/or other materials provided with the
 13// distribution.
 14//     * Neither the name of Google Inc. nor the names of its
 15// contributors may be used to endorse or promote products derived from
 16// this software without specific prior written permission.
 17//
 18// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 19// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 20// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 21// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 22// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 23// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 24// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 25// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 26// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 27// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 28// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 29
 30// stackwalker_amd64.cc: amd64-specific stackwalker.
 31//
 32// See stackwalker_amd64.h for documentation.
 33//
 34// Author: Mark Mentovai, Ted Mielczarek
 35
 36
 37#include "google_breakpad/processor/call_stack.h"
 38#include "google_breakpad/processor/memory_region.h"
 39#include "google_breakpad/processor/source_line_resolver_interface.h"
 40#include "google_breakpad/processor/stack_frame_cpu.h"
 41#include "processor/cfi_frame_info.h"
 42#include "processor/logging.h"
 43#include "processor/scoped_ptr.h"
 44#include "processor/stackwalker_amd64.h"
 45
 46namespace google_breakpad {
 47
 48
 49const StackwalkerAMD64::CFIWalker::RegisterSet
 50StackwalkerAMD64::cfi_register_map_[] = {
 51  // It may seem like $rip and $rsp are callee-saves, because the callee is
 52  // responsible for having them restored upon return. But the callee_saves
 53  // flags here really means that the walker should assume they're
 54  // unchanged if the CFI doesn't mention them --- clearly wrong for $rip
 55  // and $rsp.
 56  { "$rax", NULL, false,
 57    StackFrameAMD64::CONTEXT_VALID_RAX, &MDRawContextAMD64::rax },
 58  { "$rdx", NULL, false,
 59    StackFrameAMD64::CONTEXT_VALID_RDX, &MDRawContextAMD64::rdx },
 60  { "$rcx", NULL, false,
 61    StackFrameAMD64::CONTEXT_VALID_RCX, &MDRawContextAMD64::rcx },
 62  { "$rbx", NULL, true,
 63    StackFrameAMD64::CONTEXT_VALID_RBX, &MDRawContextAMD64::rbx },
 64  { "$rsi", NULL, false,
 65    StackFrameAMD64::CONTEXT_VALID_RSI, &MDRawContextAMD64::rsi },
 66  { "$rdi", NULL, false,
 67    StackFrameAMD64::CONTEXT_VALID_RDI, &MDRawContextAMD64::rdi },
 68  { "$rbp", NULL, true,
 69    StackFrameAMD64::CONTEXT_VALID_RBP, &MDRawContextAMD64::rbp },
 70  { "$rsp", ".cfa", false,
 71    StackFrameAMD64::CONTEXT_VALID_RSP, &MDRawContextAMD64::rsp },
 72  { "$r8", NULL, false,
 73    StackFrameAMD64::CONTEXT_VALID_R8,  &MDRawContextAMD64::r8 },
 74  { "$r9", NULL, false,
 75    StackFrameAMD64::CONTEXT_VALID_R9,  &MDRawContextAMD64::r9 },
 76  { "$r10", NULL, false,
 77    StackFrameAMD64::CONTEXT_VALID_R10, &MDRawContextAMD64::r10 },
 78  { "$r11", NULL, false,
 79    StackFrameAMD64::CONTEXT_VALID_R11, &MDRawContextAMD64::r11 },
 80  { "$r12", NULL, true,
 81    StackFrameAMD64::CONTEXT_VALID_R12, &MDRawContextAMD64::r12 },
 82  { "$r13", NULL, true,
 83    StackFrameAMD64::CONTEXT_VALID_R13, &MDRawContextAMD64::r13 },
 84  { "$r14", NULL, true,
 85    StackFrameAMD64::CONTEXT_VALID_R14, &MDRawContextAMD64::r14 },
 86  { "$r15", NULL, true,
 87    StackFrameAMD64::CONTEXT_VALID_R15, &MDRawContextAMD64::r15 },
 88  { "$rip", ".ra", false,
 89    StackFrameAMD64::CONTEXT_VALID_RIP, &MDRawContextAMD64::rip },
 90};
 91
 92StackwalkerAMD64::StackwalkerAMD64(const SystemInfo *system_info,
 93                                   const MDRawContextAMD64 *context,
 94                                   MemoryRegion *memory,
 95                                   const CodeModules *modules,
 96                                   SymbolSupplier *supplier,
 97                                   SourceLineResolverInterface *resolver)
 98    : Stackwalker(system_info, memory, modules, supplier, resolver),
 99      context_(context),
100      cfi_walker_(cfi_register_map_,
101                  (sizeof(cfi_register_map_) / sizeof(cfi_register_map_[0]))) {
102}
103
104
105StackFrame* StackwalkerAMD64::GetContextFrame() {
106  if (!context_ || !memory_) {
107    BPLOG(ERROR) << "Can't get context frame without context or memory";
108    return NULL;
109  }
110
111  StackFrameAMD64 *frame = new StackFrameAMD64();
112
113  // The instruction pointer is stored directly in a register, so pull it
114  // straight out of the CPU context structure.
115  frame->context = *context_;
116  frame->context_validity = StackFrameAMD64::CONTEXT_VALID_ALL;
117  frame->trust = StackFrame::FRAME_TRUST_CONTEXT;
118  frame->instruction = frame->context.rip;
119
120  return frame;
121}
122
123StackFrameAMD64 *StackwalkerAMD64::GetCallerByCFIFrameInfo(
124    const vector<StackFrame *> &frames,
125    CFIFrameInfo *cfi_frame_info) {
126  StackFrameAMD64 *last_frame = static_cast<StackFrameAMD64*>(frames.back());
127
128  scoped_ptr<StackFrameAMD64> frame(new StackFrameAMD64());
129  if (!cfi_walker_
130      .FindCallerRegisters(*memory_, *cfi_frame_info,
131                           last_frame->context, last_frame->context_validity,
132                           &frame->context, &frame->context_validity))
133    return NULL;
134
135  // Make sure we recovered all the essentials.
136  static const int essentials = (StackFrameAMD64::CONTEXT_VALID_RIP
137                                 | StackFrameAMD64::CONTEXT_VALID_RSP);
138  if ((frame->context_validity & essentials) != essentials)
139    return NULL;
140
141  frame->trust = StackFrame::FRAME_TRUST_CFI;
142  return frame.release();
143}
144
145StackFrameAMD64 *StackwalkerAMD64::GetCallerByStackScan(
146    const vector<StackFrame *> &frames) {
147  StackFrameAMD64 *last_frame = static_cast<StackFrameAMD64 *>(frames.back());
148  u_int64_t last_rsp = last_frame->context.rsp;
149  u_int64_t caller_rip_address, caller_rip;
150
151  if (!ScanForReturnAddress(last_rsp, &caller_rip_address, &caller_rip)) {
152    // No plausible return address was found.
153    return NULL;
154  }
155
156  // Create a new stack frame (ownership will be transferred to the caller)
157  // and fill it in.
158  StackFrameAMD64 *frame = new StackFrameAMD64();
159
160  frame->trust = StackFrame::FRAME_TRUST_SCAN;
161  frame->context = last_frame->context;
162  frame->context.rip = caller_rip;
163  // The caller's %rsp is directly underneath the return address pushed by
164  // the call.
165  frame->context.rsp = caller_rip_address + 8;
166  frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP |
167                            StackFrameAMD64::CONTEXT_VALID_RSP;
168
169  // Other unwinders give up if they don't have an %rbp value, so see if we
170  // can pass some plausible value on.
171  if (last_frame->context_validity & StackFrameAMD64::CONTEXT_VALID_RBP) {
172    // Functions typically push their caller's %rbp immediately upon entry,
173    // and then set %rbp to point to that. So if the callee's %rbp is
174    // pointing to the first word below the alleged return address, presume
175    // that the caller's %rbp is saved there.
176    if (caller_rip_address - 8 == last_frame->context.rbp) {
177      u_int64_t caller_rbp = 0;
178      if (memory_->GetMemoryAtAddress(last_frame->context.rbp, &caller_rbp) &&
179          caller_rbp > caller_rip_address) {
180        frame->context.rbp = caller_rbp;
181        frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
182      }
183    } else if (last_frame->context.rbp >= caller_rip_address + 8) {
184      // If the callee's %rbp is plausible as a value for the caller's
185      // %rbp, presume that the callee left it unchanged.
186      frame->context.rbp = last_frame->context.rbp;
187      frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP;
188    }
189  }
190
191  return frame;
192}
193
194StackFrame* StackwalkerAMD64::GetCallerFrame(const CallStack *stack) {
195  if (!memory_ || !stack) {
196    BPLOG(ERROR) << "Can't get caller frame without memory or stack";
197    return NULL;
198  }
199
200  const vector<StackFrame *> &frames = *stack->frames();
201  StackFrameAMD64 *last_frame = static_cast<StackFrameAMD64 *>(frames.back());
202  scoped_ptr<StackFrameAMD64> new_frame;
203
204  // If we have DWARF CFI information, use it.
205  scoped_ptr<CFIFrameInfo> cfi_frame_info(
206      resolver_ ? resolver_->FindCFIFrameInfo(last_frame) : NULL);
207  if (cfi_frame_info.get())
208    new_frame.reset(GetCallerByCFIFrameInfo(frames, cfi_frame_info.get()));
209
210  // If CFI failed, or there wasn't CFI available, fall back
211  // to stack scanning.
212  if (!new_frame.get()) {
213    new_frame.reset(GetCallerByStackScan(frames));
214  }
215
216  // If nothing worked, tell the caller.
217  if (!new_frame.get())
218    return NULL;
219
220  // Treat an instruction address of 0 as end-of-stack.
221  if (new_frame->context.rip == 0)
222    return NULL;
223
224  // If the new stack pointer is at a lower address than the old, then
225  // that's clearly incorrect. Treat this as end-of-stack to enforce
226  // progress and avoid infinite loops.
227  if (new_frame->context.rsp <= last_frame->context.rsp)
228    return NULL;
229
230  // new_frame->context.rip is the return address, which is one instruction
231  // past the CALL that caused us to arrive at the callee. Set
232  // new_frame->instruction to one less than that. This won't reference the
233  // beginning of the CALL instruction, but it's guaranteed to be within
234  // the CALL, which is sufficient to get the source line information to
235  // match up with the line that contains a function call. Callers that
236  // require the exact return address value may access the context.rip
237  // field of StackFrameAMD64.
238  new_frame->instruction = new_frame->context.rip - 1;
239
240  return new_frame.release();
241}
242
243}  // namespace google_breakpad