LLDB  mainline
ThreadPlanStack.cpp
Go to the documentation of this file.
1 //===-- ThreadPlanStack.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 #include "lldb/Target/Process.h"
11 #include "lldb/Target/Target.h"
12 #include "lldb/Target/Thread.h"
13 #include "lldb/Target/ThreadPlan.h"
14 #include "lldb/Utility/Log.h"
15 
16 using namespace lldb;
17 using namespace lldb_private;
18 
19 static void PrintPlanElement(Stream &s, const ThreadPlanSP &plan,
20  lldb::DescriptionLevel desc_level,
21  int32_t elem_idx) {
22  s.IndentMore();
23  s.Indent();
24  s.Printf("Element %d: ", elem_idx);
25  plan->GetDescription(&s, desc_level);
26  s.EOL();
27  s.IndentLess();
28 }
29 
30 ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) {
31  if (make_null) {
32  // The ThreadPlanNull doesn't do anything to the Thread, so this is actually
33  // still a const operation.
34  m_plans.push_back(
35  ThreadPlanSP(new ThreadPlanNull(const_cast<Thread &>(thread))));
36  }
37 }
38 
39 void ThreadPlanStack::DumpThreadPlans(Stream &s,
40  lldb::DescriptionLevel desc_level,
41  bool include_internal) const {
42  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
43  s.IndentMore();
44  PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal);
45  PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level,
46  include_internal);
47  PrintOneStack(s, "Discarded plan stack", m_discarded_plans, desc_level,
48  include_internal);
49  s.IndentLess();
50 }
51 
52 void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
53  const PlanStack &stack,
54  lldb::DescriptionLevel desc_level,
55  bool include_internal) const {
56  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
57  // If the stack is empty, just exit:
58  if (stack.empty())
59  return;
60 
61  // Make sure there are public completed plans:
62  bool any_public = false;
63  if (!include_internal) {
64  for (auto plan : stack) {
65  if (!plan->GetPrivate()) {
66  any_public = true;
67  break;
68  }
69  }
70  }
71 
72  if (include_internal || any_public) {
73  int print_idx = 0;
74  s.Indent();
75  s << stack_name << ":\n";
76  for (auto plan : stack) {
77  if (!include_internal && plan->GetPrivate())
78  continue;
79  PrintPlanElement(s, plan, desc_level, print_idx++);
80  }
81  }
82 }
83 
84 size_t ThreadPlanStack::CheckpointCompletedPlans() {
85  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
86  m_completed_plan_checkpoint++;
87  m_completed_plan_store.insert(
88  std::make_pair(m_completed_plan_checkpoint, m_completed_plans));
89  return m_completed_plan_checkpoint;
90 }
91 
92 void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
93  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
94  auto result = m_completed_plan_store.find(checkpoint);
95  assert(result != m_completed_plan_store.end() &&
96  "Asked for a checkpoint that didn't exist");
97  m_completed_plans.swap((*result).second);
98  m_completed_plan_store.erase(result);
99 }
100 
101 void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) {
102  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
103  m_completed_plan_store.erase(checkpoint);
104 }
105 
106 void ThreadPlanStack::ThreadDestroyed(Thread *thread) {
107  // Tell the plan stacks that this thread is going away:
108  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
109  for (ThreadPlanSP plan : m_plans)
110  plan->ThreadDestroyed();
111 
112  for (ThreadPlanSP plan : m_discarded_plans)
113  plan->ThreadDestroyed();
114 
115  for (ThreadPlanSP plan : m_completed_plans)
116  plan->ThreadDestroyed();
117 
118  // Now clear the current plan stacks:
119  m_plans.clear();
120  m_discarded_plans.clear();
121  m_completed_plans.clear();
122 
123  // Push a ThreadPlanNull on the plan stack. That way we can continue
124  // assuming that the plan stack is never empty, but if somebody errantly asks
125  // questions of a destroyed thread without checking first whether it is
126  // destroyed, they won't crash.
127  if (thread != nullptr) {
128  lldb::ThreadPlanSP null_plan_sp(new ThreadPlanNull(*thread));
129  m_plans.push_back(null_plan_sp);
130  }
131 }
132 
133 void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) {
134  // If the thread plan doesn't already have a tracer, give it its parent's
135  // tracer:
136  // The first plan has to be a base plan:
137  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
138  assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) &&
139  "Zeroth plan must be a base plan");
140 
141  if (!new_plan_sp->GetThreadPlanTracer()) {
142  assert(!m_plans.empty());
143  new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer());
144  }
145  m_plans.push_back(new_plan_sp);
146  new_plan_sp->DidPush();
147 }
148 
149 lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
150  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
151  assert(m_plans.size() > 1 && "Can't pop the base thread plan");
152 
153  // Note that moving the top element of the vector would leave it in an
154  // undefined state, and break the guarantee that the stack's thread plans are
155  // all valid.
156  lldb::ThreadPlanSP plan_sp = m_plans.back();
157  m_plans.pop_back();
158  m_completed_plans.push_back(plan_sp);
159  plan_sp->DidPop();
160  return plan_sp;
161 }
162 
163 lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
164  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
165  assert(m_plans.size() > 1 && "Can't discard the base thread plan");
166 
167  // Note that moving the top element of the vector would leave it in an
168  // undefined state, and break the guarantee that the stack's thread plans are
169  // all valid.
170  lldb::ThreadPlanSP plan_sp = m_plans.back();
171  m_plans.pop_back();
172  m_discarded_plans.push_back(plan_sp);
173  plan_sp->DidPop();
174  return plan_sp;
175 }
176 
177 // If the input plan is nullptr, discard all plans. Otherwise make sure this
178 // plan is in the stack, and if so discard up to and including it.
179 void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
180  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
181  int stack_size = m_plans.size();
182 
183  if (up_to_plan_ptr == nullptr) {
184  for (int i = stack_size - 1; i > 0; i--)
185  DiscardPlan();
186  return;
187  }
188 
189  bool found_it = false;
190  for (int i = stack_size - 1; i > 0; i--) {
191  if (m_plans[i].get() == up_to_plan_ptr) {
192  found_it = true;
193  break;
194  }
195  }
196 
197  if (found_it) {
198  bool last_one = false;
199  for (int i = stack_size - 1; i > 0 && !last_one; i--) {
200  if (GetCurrentPlan().get() == up_to_plan_ptr)
201  last_one = true;
202  DiscardPlan();
203  }
204  }
205 }
206 
207 void ThreadPlanStack::DiscardAllPlans() {
208  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
209  int stack_size = m_plans.size();
210  for (int i = stack_size - 1; i > 0; i--) {
211  DiscardPlan();
212  }
213  return;
214 }
215 
216 void ThreadPlanStack::DiscardConsultingMasterPlans() {
217  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
218  while (true) {
219  int master_plan_idx;
220  bool discard = true;
221 
222  // Find the first master plan, see if it wants discarding, and if yes
223  // discard up to it.
224  for (master_plan_idx = m_plans.size() - 1; master_plan_idx >= 0;
225  master_plan_idx--) {
226  if (m_plans[master_plan_idx]->IsMasterPlan()) {
227  discard = m_plans[master_plan_idx]->OkayToDiscard();
228  break;
229  }
230  }
231 
232  // If the master plan doesn't want to get discarded, then we're done.
233  if (!discard)
234  return;
235 
236  // First pop all the dependent plans:
237  for (int i = m_plans.size() - 1; i > master_plan_idx; i--) {
238  DiscardPlan();
239  }
240 
241  // Now discard the master plan itself.
242  // The bottom-most plan never gets discarded. "OkayToDiscard" for it
243  // means discard it's dependent plans, but not it...
244  if (master_plan_idx > 0) {
245  DiscardPlan();
246  }
247  }
248 }
249 
250 lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const {
251  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
252  assert(m_plans.size() != 0 && "There will always be a base plan.");
253  return m_plans.back();
254 }
255 
256 lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {
257  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
258  if (m_completed_plans.empty())
259  return {};
260 
261  if (!skip_private)
262  return m_completed_plans.back();
263 
264  for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
265  lldb::ThreadPlanSP completed_plan_sp;
266  completed_plan_sp = m_completed_plans[i];
267  if (!completed_plan_sp->GetPrivate())
268  return completed_plan_sp;
269  }
270  return {};
271 }
272 
273 lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
274  bool skip_private) const {
275  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
276  uint32_t idx = 0;
277 
278  for (lldb::ThreadPlanSP plan_sp : m_plans) {
279  if (skip_private && plan_sp->GetPrivate())
280  continue;
281  if (idx == plan_idx)
282  return plan_sp;
283  idx++;
284  }
285  return {};
286 }
287 
288 lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
289  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
290  if (m_completed_plans.empty())
291  return {};
292 
293  for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
294  lldb::ValueObjectSP return_valobj_sp;
295  return_valobj_sp = m_completed_plans[i]->GetReturnValueObject();
296  if (return_valobj_sp)
297  return return_valobj_sp;
298  }
299  return {};
300 }
301 
302 lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
303  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
304  if (m_completed_plans.empty())
305  return {};
306 
307  for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
308  lldb::ExpressionVariableSP expression_variable_sp;
309  expression_variable_sp = m_completed_plans[i]->GetExpressionVariable();
310  if (expression_variable_sp)
311  return expression_variable_sp;
312  }
313  return {};
314 }
315 bool ThreadPlanStack::AnyPlans() const {
316  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
317  // There is always a base plan...
318  return m_plans.size() > 1;
319 }
320 
321 bool ThreadPlanStack::AnyCompletedPlans() const {
322  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
323  return !m_completed_plans.empty();
324 }
325 
326 bool ThreadPlanStack::AnyDiscardedPlans() const {
327  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
328  return !m_discarded_plans.empty();
329 }
330 
331 bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
332  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
333  for (auto plan : m_completed_plans) {
334  if (plan.get() == in_plan)
335  return true;
336  }
337  return false;
338 }
339 
340 bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
341  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
342  for (auto plan : m_discarded_plans) {
343  if (plan.get() == in_plan)
344  return true;
345  }
346  return false;
347 }
348 
349 ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
350  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
351  if (current_plan == nullptr)
352  return nullptr;
353 
354  // Look first in the completed plans, if the plan is here and there is
355  // a completed plan above it, return that.
356  int stack_size = m_completed_plans.size();
357  for (int i = stack_size - 1; i > 0; i--) {
358  if (current_plan == m_completed_plans[i].get())
359  return m_completed_plans[i - 1].get();
360  }
361 
362  // If this is the first completed plan, the previous one is the
363  // bottom of the regular plan stack.
364  if (stack_size > 0 && m_completed_plans[0].get() == current_plan) {
365  return GetCurrentPlan().get();
366  }
367 
368  // Otherwise look for it in the regular plans.
369  stack_size = m_plans.size();
370  for (int i = stack_size - 1; i > 0; i--) {
371  if (current_plan == m_plans[i].get())
372  return m_plans[i - 1].get();
373  }
374  return nullptr;
375 }
376 
377 ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
378  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
379  int stack_size = m_plans.size();
380 
381  for (int i = stack_size - 1; i > 0; i--) {
382  if (m_plans[i]->GetKind() == ThreadPlan::eKindCallFunction)
383  return m_plans[i].get();
384  }
385  return nullptr;
386 }
387 
388 void ThreadPlanStack::ClearThreadCache() {
389  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
390  for (lldb::ThreadPlanSP thread_plan_sp : m_plans)
391  thread_plan_sp->ClearThreadCache();
392 }
393 
394 void ThreadPlanStack::WillResume() {
395  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
396  m_completed_plans.clear();
397  m_discarded_plans.clear();
398 }
399 
400 void ThreadPlanStackMap::Update(ThreadList &current_threads,
401  bool delete_missing,
402  bool check_for_new) {
403 
404  // Now find all the new threads and add them to the map:
405  if (check_for_new) {
406  for (auto thread : current_threads.Threads()) {
407  lldb::tid_t cur_tid = thread->GetID();
408  if (!Find(cur_tid)) {
409  AddThread(*thread.get());
410  thread->QueueBasePlan(true);
411  }
412  }
413  }
414 
415  // If we aren't reaping missing threads at this point,
416  // we are done.
417  if (!delete_missing)
418  return;
419  // Otherwise scan for absent TID's.
420  std::vector<lldb::tid_t> missing_threads;
421  // If we are going to delete plans from the plan stack,
422  // then scan for absent TID's:
423  for (auto &thread_plans : m_plans_list) {
424  lldb::tid_t cur_tid = thread_plans.first;
425  ThreadSP thread_sp = current_threads.FindThreadByID(cur_tid);
426  if (!thread_sp)
427  missing_threads.push_back(cur_tid);
428  }
429  for (lldb::tid_t tid : missing_threads) {
430  RemoveTID(tid);
431  }
432 }
433 
434 void ThreadPlanStackMap::DumpPlans(Stream &strm,
435  lldb::DescriptionLevel desc_level,
436  bool internal, bool condense_if_trivial,
437  bool skip_unreported) {
438  for (auto &elem : m_plans_list) {
439  lldb::tid_t tid = elem.first;
440  uint32_t index_id = 0;
441  ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
442 
443  if (skip_unreported) {
444  if (!thread_sp)
445  continue;
446  }
447  if (thread_sp)
448  index_id = thread_sp->GetIndexID();
449 
450  if (condense_if_trivial) {
451  if (!elem.second.AnyPlans() && !elem.second.AnyCompletedPlans() &&
452  !elem.second.AnyDiscardedPlans()) {
453  strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
454  strm.IndentMore();
455  strm.Indent();
456  strm.Printf("No active thread plans\n");
457  strm.IndentLess();
458  return;
459  }
460  }
461 
462  strm.Indent();
463  strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
464 
465  elem.second.DumpThreadPlans(strm, desc_level, internal);
466  }
467 }
468 
469 bool ThreadPlanStackMap::DumpPlansForTID(Stream &strm, lldb::tid_t tid,
470  lldb::DescriptionLevel desc_level,
471  bool internal,
472  bool condense_if_trivial,
473  bool skip_unreported) {
474  uint32_t index_id = 0;
475  ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
476 
477  if (skip_unreported) {
478  if (!thread_sp) {
479  strm.Format("Unknown TID: {0}", tid);
480  return false;
481  }
482  }
483 
484  if (thread_sp)
485  index_id = thread_sp->GetIndexID();
486  ThreadPlanStack *stack = Find(tid);
487  if (!stack) {
488  strm.Format("Unknown TID: {0}\n", tid);
489  return false;
490  }
491 
492  if (condense_if_trivial) {
493  if (!stack->AnyPlans() && !stack->AnyCompletedPlans() &&
494  !stack->AnyDiscardedPlans()) {
495  strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
496  strm.IndentMore();
497  strm.Indent();
498  strm.Printf("No active thread plans\n");
499  strm.IndentLess();
500  return true;
501  }
502  }
503 
504  strm.Indent();
505  strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
506 
507  stack->DumpThreadPlans(strm, desc_level, internal);
508  return true;
509 }
510 
511 bool ThreadPlanStackMap::PrunePlansForTID(lldb::tid_t tid) {
512  // We only remove the plans for unreported TID's.
513  ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
514  if (thread_sp)
515  return false;
516 
517  return RemoveTID(tid);
518 }
lldb_private::Stream::IndentLess
void IndentLess(unsigned amount=2)
Decrement the current indentation level.
Definition: Stream.cpp:171
lldb_private::Stream::Format
void Format(const char *format, Args &&... args)
Definition: Stream.h:309
lldb_private::ThreadPlanStack::AnyPlans
bool AnyPlans() const
Definition: ThreadPlanStack.cpp:315
lldb_private::ThreadPlanNull
Definition: ThreadPlan.h:564
ThreadPlanStack.h
lldb_private::Stream
Definition: Stream.h:28
Process.h
Target.h
lldb_private::ThreadPlan
Definition: ThreadPlan.h:282
lldb_private::ThreadList::FindThreadByID
lldb::ThreadSP FindThreadByID(lldb::tid_t tid, bool can_update=true)
Definition: ThreadList.cpp:102
lldb_private::Stream::Indent
size_t Indent(llvm::StringRef s="")
Indent the current line in the stream.
Definition: Stream.cpp:130
Log.h
lldb_private::Thread
Definition: Thread.h:60
lldb_private::ThreadPlanStack::PlanStack
std::vector< lldb::ThreadPlanSP > PlanStack
Definition: ThreadPlanStack.h:38
lldb_private::ThreadList
Definition: ThreadList.h:26
lldb_private::ThreadCollection::Threads
virtual ThreadIterable Threads()
Definition: ThreadCollection.h:46
PrintPlanElement
static void PrintPlanElement(Stream &s, const ThreadPlanSP &plan, lldb::DescriptionLevel desc_level, int32_t elem_idx)
Definition: ThreadPlanStack.cpp:19
Thread.h
ThreadPlan.h
uint32_t
lldb_private::ThreadPlanStack::AnyDiscardedPlans
bool AnyDiscardedPlans() const
Definition: ThreadPlanStack.cpp:326
lldb_private::Stream::IndentMore
void IndentMore(unsigned amount=2)
Increment the current indentation level.
Definition: Stream.cpp:168
lldb_private::Stream::EOL
size_t EOL()
Output and End of Line character to the stream.
Definition: Stream.cpp:128
lldb_private::ThreadPlanStack::DumpThreadPlans
void DumpThreadPlans(Stream &s, lldb::DescriptionLevel desc_level, bool include_internal) const
Definition: ThreadPlanStack.cpp:39
lldb_private::ThreadPlanStack::AnyCompletedPlans
bool AnyCompletedPlans() const
Definition: ThreadPlanStack.cpp:321
lldb_private::Stream::Printf
size_t Printf(const char *format,...) __attribute__((format(printf
Output printf formatted output to the stream.
Definition: Stream.cpp:107
lldb_private::ThreadPlanStack
Definition: ThreadPlanStack.h:31
lldb_private
A class that represents a running process on the host machine.
Definition: SBCommandInterpreterRunOptions.h:16
lldb
Definition: SBAddress.h:15
lldb::DescriptionLevel
DescriptionLevel
Description levels for "void GetDescription(Stream *, DescriptionLevel)" calls.
Definition: lldb-enumerations.h:207
lldb::tid_t
uint64_t tid_t
Definition: lldb-types.h:86