LLDB mainline
ThreadList.cpp
Go to the documentation of this file.
1//===-- ThreadList.cpp ----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include <cstdlib>
10
11#include <algorithm>
12
13#include "lldb/Target/Process.h"
15#include "lldb/Target/Thread.h"
21#include "lldb/Utility/Log.h"
22#include "lldb/Utility/State.h"
23#include "llvm/ADT/DenseSet.h"
24#include "llvm/ADT/SmallVector.h"
25
26using namespace lldb;
27using namespace lldb_private;
28
32
36 // Use the assignment operator since it uses the mutex
37 *this = rhs;
38}
39
41 if (this != &rhs) {
42 // We only allow assignments between thread lists describing the same
43 // process. Same process implies same mutex, which means it's enough to lock
44 // just the current object.
45 assert(&m_process == &rhs.m_process);
46 assert(&GetMutex() == &rhs.GetMutex());
47 std::lock_guard<std::recursive_mutex> guard(GetMutex());
48
49 m_stop_id = rhs.m_stop_id;
50 m_threads = rhs.m_threads;
52 }
53 return *this;
54}
55
57 // Clear the thread list. Clear will take the mutex lock which will ensure
58 // that if anyone is using the list they won't get it removed while using it.
59 Clear();
60}
61
63 if (m_expression_tid_stack.empty())
64 return GetSelectedThread();
65 ThreadSP expr_thread_sp = FindThreadByID(m_expression_tid_stack.back());
66 if (expr_thread_sp)
67 return expr_thread_sp;
68 else
69 return GetSelectedThread();
70}
71
75
80
81uint32_t ThreadList::GetStopID() const { return m_stop_id; }
82
83void ThreadList::SetStopID(uint32_t stop_id) { m_stop_id = stop_id; }
84
85uint32_t ThreadList::GetSize(bool can_update) {
86 std::lock_guard<std::recursive_mutex> guard(GetMutex());
87
88 if (can_update)
89 m_process.UpdateThreadListIfNeeded();
90 return m_threads.size();
91}
92
93ThreadSP ThreadList::GetThreadAtIndex(uint32_t idx, bool can_update) {
94 std::lock_guard<std::recursive_mutex> guard(GetMutex());
95
96 if (can_update)
97 m_process.UpdateThreadListIfNeeded();
98
99 ThreadSP thread_sp;
100 if (idx < m_threads.size())
101 thread_sp = m_threads[idx];
102 return thread_sp;
103}
104
106 std::lock_guard<std::recursive_mutex> guard(GetMutex());
107
108 if (can_update)
109 m_process.UpdateThreadListIfNeeded();
110
111 ThreadSP thread_sp;
112 uint32_t idx = 0;
113 const uint32_t num_threads = m_threads.size();
114 for (idx = 0; idx < num_threads; ++idx) {
115 if (m_threads[idx]->GetID() == tid) {
116 thread_sp = m_threads[idx];
117 break;
118 }
119 }
120 return thread_sp;
121}
122
124 std::lock_guard<std::recursive_mutex> guard(GetMutex());
125
126 if (can_update)
127 m_process.UpdateThreadListIfNeeded();
128
129 ThreadSP thread_sp;
130 uint32_t idx = 0;
131 const uint32_t num_threads = m_threads.size();
132 for (idx = 0; idx < num_threads; ++idx) {
133 if (m_threads[idx]->GetProtocolID() == tid) {
134 thread_sp = m_threads[idx];
135 break;
136 }
137 }
138 return thread_sp;
139}
140
142 std::lock_guard<std::recursive_mutex> guard(GetMutex());
143
144 if (can_update)
145 m_process.UpdateThreadListIfNeeded();
146
147 ThreadSP thread_sp;
148 uint32_t idx = 0;
149 const uint32_t num_threads = m_threads.size();
150 for (idx = 0; idx < num_threads; ++idx) {
151 if (m_threads[idx]->GetID() == tid) {
152 thread_sp = m_threads[idx];
153 m_threads.erase(m_threads.begin() + idx);
154 break;
155 }
156 }
157 return thread_sp;
158}
159
161 bool can_update) {
162 std::lock_guard<std::recursive_mutex> guard(GetMutex());
163
164 if (can_update)
165 m_process.UpdateThreadListIfNeeded();
166
167 ThreadSP thread_sp;
168 uint32_t idx = 0;
169 const uint32_t num_threads = m_threads.size();
170 for (idx = 0; idx < num_threads; ++idx) {
171 if (m_threads[idx]->GetProtocolID() == tid) {
172 thread_sp = m_threads[idx];
173 m_threads.erase(m_threads.begin() + idx);
174 break;
175 }
176 }
177 return thread_sp;
178}
179
181 ThreadSP thread_sp;
182 if (thread_ptr) {
183 std::lock_guard<std::recursive_mutex> guard(GetMutex());
184
185 uint32_t idx = 0;
186 const uint32_t num_threads = m_threads.size();
187 for (idx = 0; idx < num_threads; ++idx) {
188 if (m_threads[idx].get() == thread_ptr) {
189 thread_sp = m_threads[idx];
190 break;
191 }
192 }
193 }
194 return thread_sp;
195}
196
197ThreadSP ThreadList::FindThreadByIndexID(uint32_t index_id, bool can_update) {
198 std::lock_guard<std::recursive_mutex> guard(GetMutex());
199
200 if (can_update)
201 m_process.UpdateThreadListIfNeeded();
202
203 ThreadSP thread_sp;
204 const uint32_t num_threads = m_threads.size();
205 for (uint32_t idx = 0; idx < num_threads; ++idx) {
206 if (m_threads[idx]->GetIndexID() == index_id) {
207 thread_sp = m_threads[idx];
208 break;
209 }
210 }
211 return thread_sp;
212}
213
215 // Running events should never stop, obviously...
216
217 Log *log = GetLog(LLDBLog::Step);
218
219 // The ShouldStop method of the threads can do a whole lot of work, figuring
220 // out whether the thread plan conditions are met. So we don't want to keep
221 // the ThreadList locked the whole time we are doing this.
222 // FIXME: It is possible that running code could cause new threads
223 // to be created. If that happens, we will miss asking them whether they
224 // should stop. This is not a big deal since we haven't had a chance to hang
225 // any interesting operations on those threads yet.
226
227 collection threads_copy;
228 {
229 // Scope for locker
230 std::lock_guard<std::recursive_mutex> guard(GetMutex());
231
232 m_process.UpdateThreadListIfNeeded();
233 for (lldb::ThreadSP thread_sp : m_threads) {
234 // This is an optimization... If we didn't let a thread run in between
235 // the previous stop and this one, we shouldn't have to consult it for
236 // ShouldStop. So just leave it off the list we are going to inspect.
237 // If the thread didn't run but had work to do before declaring a public
238 // stop, then also include it.
239 // On Linux, if a thread-specific conditional breakpoint was hit, it won't
240 // necessarily be the thread that hit the breakpoint itself that
241 // evaluates the conditional expression, so the thread that hit the
242 // breakpoint could still be asked to stop, even though it hasn't been
243 // allowed to run since the previous stop.
244 if (thread_sp->GetTemporaryResumeState() != eStateSuspended ||
245 thread_sp->IsStillAtLastBreakpointHit()
246 || thread_sp->ShouldRunBeforePublicStop())
247 threads_copy.push_back(thread_sp);
248 }
249
250 // It is possible the threads we were allowing to run all exited and then
251 // maybe the user interrupted or something, then fall back on looking at
252 // all threads:
253
254 if (threads_copy.size() == 0)
255 threads_copy = m_threads;
256 }
257
258 collection::iterator pos, end = threads_copy.end();
259
260 if (log) {
261 log->PutCString("");
262 LLDB_LOGF(log,
263 "ThreadList::%s: %" PRIu64 " threads, %" PRIu64
264 " unsuspended threads",
265 __FUNCTION__, (uint64_t)m_threads.size(),
266 (uint64_t)threads_copy.size());
267 }
268
269 bool did_anybody_stop_for_a_reason = false;
270
271 // If the event is an Interrupt event, then we're going to stop no matter
272 // what. Otherwise, presume we won't stop.
273 bool should_stop = false;
275 LLDB_LOGF(
276 log, "ThreadList::%s handling interrupt event, should stop set to true",
277 __FUNCTION__);
278
279 should_stop = true;
280 }
281
282 // Now we run through all the threads and get their stop info's. We want to
283 // make sure to do this first before we start running the ShouldStop, because
284 // one thread's ShouldStop could destroy information (like deleting a thread
285 // specific breakpoint another thread had stopped at) which could lead us to
286 // compute the StopInfo incorrectly. We don't need to use it here, we just
287 // want to make sure it gets computed.
288
289 for (pos = threads_copy.begin(); pos != end; ++pos) {
290 ThreadSP thread_sp(*pos);
291 thread_sp->GetStopInfo();
292 }
293
294 // If a thread needs to finish some job that can be done just on this thread
295 // before broadcastion the stop, it will signal that by returning true for
296 // ShouldRunBeforePublicStop. This variable gathers the results from that.
297 bool a_thread_needs_to_run = false;
298 for (pos = threads_copy.begin(); pos != end; ++pos) {
299 ThreadSP thread_sp(*pos);
300
301 // We should never get a stop for which no thread had a stop reason, but
302 // sometimes we do see this - for instance when we first connect to a
303 // remote stub. In that case we should stop, since we can't figure out the
304 // right thing to do and stopping gives the user control over what to do in
305 // this instance.
306 //
307 // Note, this causes a problem when you have a thread specific breakpoint,
308 // and a bunch of threads hit the breakpoint, but not the thread which we
309 // are waiting for. All the threads that are not "supposed" to hit the
310 // breakpoint are marked as having no stop reason, which is right, they
311 // should not show a stop reason. But that triggers this code and causes
312 // us to stop seemingly for no reason.
313 //
314 // Since the only way we ever saw this error was on first attach, I'm only
315 // going to trigger set did_anybody_stop_for_a_reason to true unless this
316 // is the first stop.
317 //
318 // If this becomes a problem, we'll have to have another StopReason like
319 // "StopInfoHidden" which will look invalid everywhere but at this check.
320
321 if (thread_sp->GetProcess()->GetStopID() > 1)
322 did_anybody_stop_for_a_reason = true;
323 else
324 did_anybody_stop_for_a_reason |= thread_sp->ThreadStoppedForAReason();
325
326 const bool thread_should_stop = thread_sp->ShouldStop(event_ptr);
327
328 if (thread_should_stop)
329 should_stop |= true;
330 else {
331 bool this_thread_forces_run = thread_sp->ShouldRunBeforePublicStop();
332 a_thread_needs_to_run |= this_thread_forces_run;
333 if (this_thread_forces_run)
334 LLDB_LOG(log,
335 "ThreadList::{0} thread: {1:x}, "
336 "says it needs to run before public stop.",
337 __FUNCTION__, thread_sp->GetID());
338 }
339 }
340
341 if (a_thread_needs_to_run) {
342 should_stop = false;
343 } else if (!should_stop && !did_anybody_stop_for_a_reason) {
344 should_stop = true;
345 LLDB_LOGF(log,
346 "ThreadList::%s we stopped but no threads had a stop reason, "
347 "overriding should_stop and stopping.",
348 __FUNCTION__);
349 }
350
351 LLDB_LOGF(log, "ThreadList::%s overall should_stop = %i", __FUNCTION__,
352 should_stop);
353
354 if (should_stop) {
355 for (pos = threads_copy.begin(); pos != end; ++pos) {
356 ThreadSP thread_sp(*pos);
357 thread_sp->WillStop();
358 }
359 }
360
361 return should_stop;
362}
363
365 std::lock_guard<std::recursive_mutex> guard(GetMutex());
366
367 Vote result = eVoteNoOpinion;
368 m_process.UpdateThreadListIfNeeded();
369 collection::iterator pos, end = m_threads.end();
370
371 Log *log = GetLog(LLDBLog::Step);
372
373 LLDB_LOGF(log, "ThreadList::%s %" PRIu64 " threads", __FUNCTION__,
374 (uint64_t)m_threads.size());
375
376 // Run through the threads and ask whether we should report this event. For
377 // stopping, a YES vote wins over everything. A NO vote wins over NO
378 // opinion. The exception is if a thread has work it needs to force before
379 // a public stop, which overrides everyone else's opinion:
380 for (pos = m_threads.begin(); pos != end; ++pos) {
381 ThreadSP thread_sp(*pos);
382 if (thread_sp->ShouldRunBeforePublicStop()) {
383 LLDB_LOG(log, "Thread {0:x} has private business to complete, overrode "
384 "the should report stop.", thread_sp->GetID());
385 result = eVoteNo;
386 break;
387 }
388
389 const Vote vote = thread_sp->ShouldReportStop(event_ptr);
390 switch (vote) {
391 case eVoteNoOpinion:
392 continue;
393
394 case eVoteYes:
395 result = eVoteYes;
396 break;
397
398 case eVoteNo:
399 if (result == eVoteNoOpinion) {
400 result = eVoteNo;
401 } else {
402 LLDB_LOG(log,
403 "Thread {0:x} voted {1}, but lost out because result was {2}",
404 thread_sp->GetID(), vote, result);
405 }
406 break;
407 }
408 }
409 LLDB_LOG(log, "Returning {0}", result);
410 return result;
411}
412
414 std::lock_guard<std::recursive_mutex> guard(GetMutex());
415
416 m_process.UpdateThreadListIfNeeded();
417 collection::iterator pos, end = m_threads.end();
418 for (pos = m_threads.begin(); pos != end; ++pos) {
419 ThreadSP thread_sp(*pos);
420 thread_sp->SetShouldReportStop(vote);
421 }
422}
423
425
426 std::lock_guard<std::recursive_mutex> guard(GetMutex());
427
428 Vote result = eVoteNoOpinion;
429 m_process.UpdateThreadListIfNeeded();
430 collection::iterator pos, end = m_threads.end();
431
432 // Run through the threads and ask whether we should report this event. The
433 // rule is NO vote wins over everything, a YES vote wins over no opinion.
434
435 Log *log = GetLog(LLDBLog::Step);
436
437 for (pos = m_threads.begin(); pos != end; ++pos) {
438 if ((*pos)->GetResumeState() != eStateSuspended) {
439 switch ((*pos)->ShouldReportRun(event_ptr)) {
440 case eVoteNoOpinion:
441 continue;
442 case eVoteYes:
443 if (result == eVoteNoOpinion)
444 result = eVoteYes;
445 break;
446 case eVoteNo:
447 LLDB_LOGF(log,
448 "ThreadList::ShouldReportRun() thread %d (0x%4.4" PRIx64
449 ") says don't report.",
450 (*pos)->GetIndexID(), (*pos)->GetID());
451 result = eVoteNo;
452 break;
453 }
454 }
455 }
456 return result;
457}
458
460 std::lock_guard<std::recursive_mutex> guard(GetMutex());
461 m_stop_id = 0;
462 m_threads.clear();
464}
465
467 std::lock_guard<std::recursive_mutex> guard(GetMutex());
468 const uint32_t num_threads = m_threads.size();
469 for (uint32_t idx = 0; idx < num_threads; ++idx) {
470 m_threads[idx]->DestroyThread();
471 }
472}
473
475 std::lock_guard<std::recursive_mutex> guard(GetMutex());
476
477 m_process.UpdateThreadListIfNeeded();
478
479 Log *log = GetLog(LLDBLog::Step);
480 if (log && log->GetVerbose())
481 LLDB_LOGF(log,
482 "Turning off notification of new threads while single stepping "
483 "a thread.");
484
485 collection::iterator pos, end = m_threads.end();
486 for (pos = m_threads.begin(); pos != end; ++pos)
487 (*pos)->RefreshStateAfterStop();
488}
489
491 // You don't need to update the thread list here, because only threads that
492 // you currently know about have any thread plans.
493 std::lock_guard<std::recursive_mutex> guard(GetMutex());
494
495 collection::iterator pos, end = m_threads.end();
496 for (pos = m_threads.begin(); pos != end; ++pos)
497 (*pos)->DiscardThreadPlans(true);
498}
499
501 // Run through the threads and perform their momentary actions. But we only
502 // do this for threads that are running, user suspended threads stay where
503 // they are.
504
505 std::lock_guard<std::recursive_mutex> guard(GetMutex());
506 m_process.UpdateThreadListIfNeeded();
507
508 collection::iterator pos, end = m_threads.end();
509
510 // Clear tracking state from the previous stop and pop any leftover
511 // StepOverBreakpoint plans. This gives us a clean slate: plans will be
512 // recreated fresh by SetupToStepOverBreakpointIfNeeded below, and the
513 // batching logic will recompute deferred state from scratch.
515 for (const auto &thread_sp : m_threads) {
516 ThreadPlan *plan = thread_sp->GetCurrentPlan();
517 if (plan && plan->GetKind() == ThreadPlan::eKindStepOverBreakpoint) {
518 auto *bp_plan = static_cast<ThreadPlanStepOverBreakpoint *>(plan);
519 // Only pop plans created by our batching logic (deferred plans).
520 // Plans from the single-thread path must not be popped, as doing so
521 // would change the StopOthers scan result and cause other threads
522 // to lose their breakpoint stop reason.
523 if (bp_plan->GetDeferReenableBreakpointSite()) {
524 // Suppress the re-enable side effect in DidPop(), the breakpoint
525 // may still be disabled from the previous batch, and we don't want
526 // to toggle it. The new plans will handle re-enable correctly.
527 bp_plan->SetReenabledBreakpointSite();
528 thread_sp->DiscardPlan();
529 }
530 }
531 }
532
533 // Go through the threads and see if any thread wants to run just itself.
534 // if so then pick one and run it.
535
536 // Collect threads for batched vCont for multiple threads at the same
537 // breakpoint.
538 llvm::SmallVector<ThreadSP> batched_step_threads;
539
540 ThreadList run_me_only_list(m_process);
541
542 run_me_only_list.SetStopID(m_process.GetStopID());
543
544 // One or more threads might want to "Stop Others". We want to handle all
545 // those requests first. And if there is a thread that wanted to "resume
546 // before a public stop", let it get the first crack:
547 // There are two special kinds of thread that have priority for "StopOthers":
548 // a "ShouldRunBeforePublicStop thread, or the currently selected thread. If
549 // we find one satisfying that critereon, put it here.
550 ThreadSP thread_to_run;
551 for (pos = m_threads.begin(); pos != end; ++pos) {
552 ThreadSP thread_sp(*pos);
553 if (thread_sp->GetResumeState() != eStateSuspended &&
554 thread_sp->GetCurrentPlan()->StopOthers()) {
555 if (thread_sp->IsOperatingSystemPluginThread() &&
556 !thread_sp->GetBackingThread())
557 continue;
558
559 // You can't say "stop others" and also want yourself to be suspended.
560 assert(thread_sp->GetCurrentPlan()->RunState() != eStateSuspended);
561 run_me_only_list.AddThread(thread_sp);
562
563 if (thread_sp == GetSelectedThread())
564 thread_to_run = thread_sp;
565
566 if (thread_sp->ShouldRunBeforePublicStop()) {
567 // This takes precedence, so if we find one of these, service it:
568 thread_to_run = thread_sp;
569 break;
570 }
571 }
572 }
573
574 if (run_me_only_list.GetSize(false) > 0 && !thread_to_run) {
575 if (run_me_only_list.GetSize(false) == 1) {
576 thread_to_run = run_me_only_list.GetThreadAtIndex(0);
577 } else {
578 int random_thread =
579 (int)((run_me_only_list.GetSize(false) * (double)rand()) /
580 (RAND_MAX + 1.0));
581 thread_to_run = run_me_only_list.GetThreadAtIndex(random_thread);
582 }
583 }
584
585 if (thread_to_run != nullptr) {
586 direction = thread_to_run->GetCurrentPlan()->GetDirection();
587 } else {
588 direction = m_process.GetBaseDirection();
589 }
590
591 // Give all the threads that are likely to run a last chance to set up their
592 // state before we negotiate who is actually going to get a chance to run...
593 // Don't set to resume suspended threads, and if any thread wanted to stop
594 // others, only call setup on the threads that request StopOthers...
595 if (thread_to_run != nullptr) {
596 // See if any thread wants to run stopping others. If it does, then we
597 // won't setup the other threads for resume, since they aren't going to get
598 // a chance to run. This is necessary because the SetupForResume might add
599 // "StopOthers" plans which would then get to be part of the who-gets-to-run
600 // negotiation, but they're coming in after the fact, and the threads that
601 // are already set up should take priority.
602 if (thread_to_run->SetupToStepOverBreakpointIfNeeded(direction)) {
603 // We only need to step over breakpoints when running forward, and the
604 // step-over-breakpoint plan itself wants to run forward, so this
605 // keeps our desired direction.
606 assert(thread_to_run->GetCurrentPlan()->GetDirection() == direction);
607 }
608 } else {
609 // Pre-scan to find all threads that need to step over a breakpoint,
610 // and group them by breakpoint address. This optimization allows us to
611 // step multiple threads over the same breakpoint with minimal breakpoint
612 // swaps, only the last thread in each group will re-enable the breakpoint.
613 llvm::DenseMap<lldb::addr_t, llvm::SmallVector<ThreadSP>> breakpoint_groups;
614 bool found_run_before_public_stop = false;
615
616 for (pos = m_threads.begin(); pos != end; ++pos) {
617 ThreadSP thread_sp(*pos);
618 if (thread_sp->GetResumeState() != eStateSuspended) {
619 if (thread_sp->IsOperatingSystemPluginThread() &&
620 !thread_sp->GetBackingThread())
621 continue;
622 if (thread_sp->SetupToStepOverBreakpointIfNeeded(direction)) {
623 // We only need to step over breakpoints when running forward, and the
624 // step-over-breakpoint plan itself wants to run forward, so this
625 // keeps our desired direction.
626 assert(thread_sp->GetCurrentPlan()->GetDirection() == direction);
627 // You can't say "stop others" and also want yourself to be suspended.
628 assert(thread_sp->GetCurrentPlan()->RunState() != eStateSuspended);
629
630 // Get the breakpoint address from the step-over-breakpoint plan.
631 ThreadPlan *current_plan = thread_sp->GetCurrentPlan();
632 if (current_plan &&
633 current_plan->GetKind() == ThreadPlan::eKindStepOverBreakpoint) {
635 static_cast<ThreadPlanStepOverBreakpoint *>(current_plan);
636 lldb::addr_t bp_addr = bp_plan->GetBreakpointLoadAddress();
637 breakpoint_groups[bp_addr].push_back(thread_sp);
638 }
639
640 thread_to_run = thread_sp;
641 if (thread_sp->ShouldRunBeforePublicStop()) {
642 // This takes precedence, so if we find one of these, service it:
643 found_run_before_public_stop = true;
644 break;
645 }
646 }
647 }
648 }
649
650 // Only apply batching optimization if we have a complete picture of
651 // breakpoint groups. If a ShouldRunBeforePublicStop thread caused the
652 // scan to exit early, the groups are incomplete and the priority thread
653 // must run solo. Deferred state will be cleaned up on next WillResume().
654 if (!found_run_before_public_stop) {
655 // For each group of threads at the same breakpoint, register them with
656 // ThreadList and set them to use deferred re-enable. The breakpoint will
657 // only be re-enabled when ALL threads have finished stepping over it.
658 // Also collect threads for batched vCont if multiple threads at same BP.
659 for (auto &group : breakpoint_groups) {
660 lldb::addr_t bp_addr = group.first;
661 llvm::SmallVector<ThreadSP> &threads = group.second;
662
663 if (threads.size() > 1) {
664 // Use tracking since multiple threads are stepping over the same
665 // breakpoint.
666 for (ThreadSP &thread_sp : threads) {
667 // Register this thread as stepping over the breakpoint.
668 RegisterThreadSteppingOverBreakpoint(bp_addr, thread_sp->GetID());
669
670 // Set the plan to defer re-enabling (use callback instead).
671 ThreadPlan *plan = thread_sp->GetCurrentPlan();
672 // Verify the plan is actually a StepOverBreakpoint plan.
673 if (plan &&
676 static_cast<ThreadPlanStepOverBreakpoint *>(plan);
677 bp_plan->SetDeferReenableBreakpointSite(true);
678 }
679 }
680
681 // Pick the largest group for batched vCont.
682 if (threads.size() > batched_step_threads.size())
683 batched_step_threads = threads;
684 }
685 // Keeps default behavior for a single thread at breakpoint.
686 }
687
688 // If we found a batch, use the first thread as thread_to_run.
689 if (!batched_step_threads.empty())
690 thread_to_run = batched_step_threads[0];
691 }
692 }
693
694 if (thread_to_run != nullptr) {
695 Log *log = GetLog(LLDBLog::Step);
696 if (log && log->GetVerbose())
697 LLDB_LOGF(log, "Turning on notification of new threads while single "
698 "stepping a thread.");
699 m_process.StartNoticingNewThreads();
700 } else {
701 Log *log = GetLog(LLDBLog::Step);
702 if (log && log->GetVerbose())
703 LLDB_LOGF(log, "Turning off notification of new threads while single "
704 "stepping a thread.");
705 m_process.StopNoticingNewThreads();
706 }
707
708 bool need_to_resume = true;
709
710 if (!batched_step_threads.empty()) {
711 // Batched stepping: all threads in the batch step together,
712 // all other threads stay suspended.
713 llvm::DenseSet<lldb::tid_t> batch_tids;
714 for (ThreadSP &thread_sp : batched_step_threads)
715 batch_tids.insert(thread_sp->GetID());
716
717 for (const auto &thread_sp : m_threads) {
718 if (batch_tids.count(thread_sp->GetID()) > 0) {
719 // This thread is in the batch, let it step.
720 if (!thread_sp->ShouldResume(thread_sp->GetCurrentPlan()->RunState()))
721 need_to_resume = false;
722 } else {
723 // Suspend it since it's not in the batch.
724 thread_sp->ShouldResume(eStateSuspended);
725 }
726 }
727 } else if (thread_to_run == nullptr) {
728 // Everybody runs as they wish:
729 for (pos = m_threads.begin(); pos != end; ++pos) {
730 ThreadSP thread_sp(*pos);
731 StateType run_state;
732 if (thread_sp->GetResumeState() != eStateSuspended)
733 run_state = thread_sp->GetCurrentPlan()->RunState();
734 else
735 run_state = eStateSuspended;
736 if (!thread_sp->ShouldResume(run_state))
737 need_to_resume = false;
738 }
739 if (need_to_resume) {
740 // Ensure all threads are running in the right direction
741 for (pos = m_threads.begin(); pos != end; ++pos) {
742 ThreadSP thread_sp(*pos);
743 while (thread_sp->GetCurrentPlan()->GetDirection() != direction) {
744 // This can't discard the base plan because its direction is
745 // m_process.GetBaseDirection() i.e. `direction`.
746 thread_sp->DiscardPlan();
747 }
748 }
749 }
750 } else {
751 for (pos = m_threads.begin(); pos != end; ++pos) {
752 ThreadSP thread_sp(*pos);
753 if (thread_sp == thread_to_run) {
754 // Note, a thread might be able to fulfil it's plan w/o actually
755 // resuming. An example of this is a step that changes the current
756 // inlined function depth w/o moving the PC. Check that here:
757 if (!thread_sp->ShouldResume(thread_sp->GetCurrentPlan()->RunState()))
758 need_to_resume = false;
759 } else
760 thread_sp->ShouldResume(eStateSuspended);
761 }
762 }
763
764 return need_to_resume;
765}
766
768 std::lock_guard<std::recursive_mutex> guard(GetMutex());
769 collection::iterator pos, end = m_threads.end();
770 for (pos = m_threads.begin(); pos != end; ++pos) {
771 // Don't clear out threads that aren't going to get a chance to run, rather
772 // leave their state for the next time around.
773 ThreadSP thread_sp(*pos);
774 if (thread_sp->GetTemporaryResumeState() != eStateSuspended)
775 thread_sp->DidResume();
776 }
777}
778
780 std::lock_guard<std::recursive_mutex> guard(GetMutex());
781 collection::iterator pos, end = m_threads.end();
782 for (pos = m_threads.begin(); pos != end; ++pos) {
783 // Notify threads that the process just stopped. Note, this currently
784 // assumes that all threads in the list stop when the process stops. In
785 // the future we will want to support a debugging model where some threads
786 // continue to run while others are stopped. We either need to handle that
787 // somehow here or create a special thread list containing only threads
788 // which will stop in the code that calls this method (currently
789 // Process::SetPrivateState).
790 ThreadSP thread_sp(*pos);
791 if (StateIsRunningState(thread_sp->GetState()))
792 thread_sp->DidStop();
793 }
794}
795
797 std::lock_guard<std::recursive_mutex> guard(GetMutex());
799 if (!thread_sp.get()) {
800 if (m_threads.size() == 0)
801 return thread_sp;
802 m_selected_tid = m_threads[0]->GetID();
803 thread_sp = m_threads[0];
804 }
805 return thread_sp;
806}
807
809 std::lock_guard<std::recursive_mutex> guard(GetMutex());
810 ThreadSP selected_thread_sp(FindThreadByID(tid));
811 if (selected_thread_sp) {
812 m_selected_tid = tid;
813 selected_thread_sp->SetDefaultFileAndLineToSelectedFrame();
814 } else
816
817 if (notify)
819
821}
822
823bool ThreadList::SetSelectedThreadByIndexID(uint32_t index_id, bool notify) {
824 std::lock_guard<std::recursive_mutex> guard(GetMutex());
825 ThreadSP selected_thread_sp(FindThreadByIndexID(index_id));
826 if (selected_thread_sp.get()) {
827 m_selected_tid = selected_thread_sp->GetID();
828 selected_thread_sp->SetDefaultFileAndLineToSelectedFrame();
829 } else
831
832 if (notify)
834
836}
837
839 ThreadSP selected_thread_sp(FindThreadByID(tid));
840 if (selected_thread_sp->EventTypeHasListeners(
842 auto data_sp =
843 std::make_shared<Thread::ThreadEventData>(selected_thread_sp);
844 selected_thread_sp->BroadcastEvent(Thread::eBroadcastBitThreadSelected,
845 data_sp);
846 }
847}
848
850 if (this != &rhs) {
851 // We only allow assignments between thread lists describing the same
852 // process. Same process implies same mutex, which means it's enough to lock
853 // just the current object.
854 assert(&m_process == &rhs.m_process);
855 assert(&GetMutex() == &rhs.GetMutex());
856 std::lock_guard<std::recursive_mutex> guard(GetMutex());
857
858 m_stop_id = rhs.m_stop_id;
859 m_threads.swap(rhs.m_threads);
861
862 // Now we look for threads that we are done with and make sure to clear
863 // them up as much as possible so anyone with a shared pointer will still
864 // have a reference, but the thread won't be of much use. Using
865 // std::weak_ptr for all backward references (such as a thread to a
866 // process) will eventually solve this issue for us, but for now, we need
867 // to work around the issue
868 collection::iterator rhs_pos, rhs_end = rhs.m_threads.end();
869 for (rhs_pos = rhs.m_threads.begin(); rhs_pos != rhs_end; ++rhs_pos) {
870 // If this thread has already been destroyed, we don't need to look for
871 // it to destroy it again.
872 if (!(*rhs_pos)->IsValid())
873 continue;
874
875 const lldb::tid_t tid = (*rhs_pos)->GetID();
876 bool thread_is_alive = false;
877 const uint32_t num_threads = m_threads.size();
878 for (uint32_t idx = 0; idx < num_threads; ++idx) {
879 ThreadSP backing_thread = m_threads[idx]->GetBackingThread();
880 if (m_threads[idx]->GetID() == tid ||
881 (backing_thread && backing_thread->GetID() == tid)) {
882 thread_is_alive = true;
883 break;
884 }
885 }
886 if (!thread_is_alive) {
887 (*rhs_pos)->DestroyThread();
888 }
889 }
890 }
891}
892
894 std::lock_guard<std::recursive_mutex> guard(GetMutex());
895 collection::iterator pos, end = m_threads.end();
896 for (pos = m_threads.begin(); pos != end; ++pos)
897 (*pos)->Flush();
898}
899
900std::recursive_mutex &ThreadList::GetMutex() const {
901 return m_process.m_thread_mutex;
902}
903
905 lldb::ThreadSP thread_sp)
907 if (thread_sp) {
908 m_tid = thread_sp->GetID();
909 m_thread_list = &thread_sp->GetProcess()->GetThreadList();
910 m_thread_list->PushExpressionExecutionThread(m_tid);
911 }
912}
913
915 tid_t tid) {
916 std::lock_guard<std::recursive_mutex> guard(GetMutex());
917 m_threads_stepping_over_bp[breakpoint_addr].insert(tid);
918
919 Log *log = GetLog(LLDBLog::Step);
920 LLDB_LOGF(
921 log,
922 "ThreadList::%s: Registered thread 0x%" PRIx64
923 " stepping over breakpoint at 0x%" PRIx64 " (now %zu threads)",
924 __FUNCTION__, tid, breakpoint_addr,
925 static_cast<size_t>(m_threads_stepping_over_bp[breakpoint_addr].size()));
926}
927
929 tid_t tid) {
930 std::lock_guard<std::recursive_mutex> guard(GetMutex());
931
932 Log *log = GetLog(LLDBLog::Step);
933
934 auto it = m_threads_stepping_over_bp.find(breakpoint_addr);
935 if (it == m_threads_stepping_over_bp.end()) {
936 // No threads registered for this breakpoint, re-enable directly.
937 LLDB_LOGF(log,
938 "ThreadList::%s: Thread 0x%" PRIx64
939 " finished stepping over breakpoint at 0x%" PRIx64
940 " but no threads were registered, re-enabling directly",
941 __FUNCTION__, tid, breakpoint_addr);
942 if (BreakpointSiteSP bp_site_sp =
943 m_process.GetBreakpointSiteList().FindByAddress(breakpoint_addr))
944 m_process.EnableBreakpointSite(bp_site_sp.get());
945 return;
946 }
947
948 // Remove this thread from the set.
949 it->second.erase(tid);
950
951 LLDB_LOGF(log,
952 "ThreadList::%s: Thread 0x%" PRIx64
953 " finished stepping over breakpoint at 0x%" PRIx64
954 " (%zu threads remaining)",
955 __FUNCTION__, tid, breakpoint_addr,
956 static_cast<size_t>(it->second.size()));
957
958 // If no more threads are stepping over this breakpoint, re-enable it.
959 if (it->second.empty()) {
960 LLDB_LOGF(log,
961 "ThreadList::%s: All threads finished stepping over breakpoint "
962 "at 0x%" PRIx64 ", re-enabling breakpoint",
963 __FUNCTION__, breakpoint_addr);
964
965 if (BreakpointSiteSP bp_site_sp =
966 m_process.GetBreakpointSiteList().FindByAddress(breakpoint_addr))
967 m_process.EnableBreakpointSite(bp_site_sp.get());
968
969 // Clean up the entry.
971 }
972}
#define LLDB_LOG(log,...)
The LLDB_LOG* macros defined below are the way to emit log messages.
Definition Log.h:369
#define LLDB_LOGF(log,...)
Definition Log.h:376
void PutCString(const char *cstr)
Definition Log.cpp:145
bool GetVerbose() const
Definition Log.cpp:326
static bool GetInterruptedFromEvent(const Event *event_ptr)
Definition Process.cpp:4591
std::vector< lldb::ThreadSP > collection
void AddThread(const lldb::ThreadSP &thread_sp)
ExpressionExecutionThreadPusher(ThreadList &thread_list, lldb::tid_t tid)
Definition ThreadList.h:53
lldb::ThreadSP RemoveThreadByID(lldb::tid_t tid, bool can_update=true)
lldb::ThreadSP GetSelectedThread()
bool ShouldStop(Event *event_ptr)
Vote ShouldReportStop(Event *event_ptr)
uint32_t GetStopID() const
void ThreadFinishedSteppingOverBreakpoint(lldb::addr_t breakpoint_addr, lldb::tid_t tid)
Called by ThreadPlanStepOverBreakpoint when a thread finishes stepping over a breakpoint.
bool SetSelectedThreadByIndexID(uint32_t index_id, bool notify=false)
lldb::ThreadSP FindThreadByProtocolID(lldb::tid_t tid, bool can_update=true)
uint32_t GetSize(bool can_update=true)
void PopExpressionExecutionThread(lldb::tid_t tid)
bool WillResume(lldb::RunDirection &direction)
The thread list asks tells all the threads it is about to resume.
bool SetSelectedThreadByID(lldb::tid_t tid, bool notify=false)
lldb::ThreadSP FindThreadByIndexID(uint32_t index_id, bool can_update=true)
Vote ShouldReportRun(Event *event_ptr)
void SetStopID(uint32_t stop_id)
lldb::ThreadSP GetThreadSPForThreadPtr(Thread *thread_ptr)
lldb::ThreadSP GetThreadAtIndex(uint32_t idx, bool can_update=true)
uint32_t m_stop_id
The process stop ID that this thread list is valid for.
Definition ThreadList.h:168
std::recursive_mutex & GetMutex() const override
lldb::ThreadSP FindThreadByID(lldb::tid_t tid, bool can_update=true)
lldb::ThreadSP GetExpressionExecutionThread()
const ThreadList & operator=(const ThreadList &rhs)
Precondition: both thread lists must be belong to the same process.
void RegisterThreadSteppingOverBreakpoint(lldb::addr_t breakpoint_addr, lldb::tid_t tid)
Register a thread that is about to step over a breakpoint.
void Update(ThreadList &rhs)
Precondition: both thread lists must be belong to the same process.
void PushExpressionExecutionThread(lldb::tid_t tid)
void SetShouldReportStop(Vote vote)
lldb::tid_t m_selected_tid
For targets that need the notion of a current thread.
Definition ThreadList.h:170
void NotifySelectedThreadChanged(lldb::tid_t tid)
ThreadList(Process &process)
lldb::ThreadSP RemoveThreadByProtocolID(lldb::tid_t tid, bool can_update=true)
llvm::DenseMap< lldb::addr_t, llvm::DenseSet< lldb::tid_t > > m_threads_stepping_over_bp
Tracks which threads are currently stepping over each breakpoint address.
Definition ThreadList.h:178
Process & m_process
The process that manages this thread list.
Definition ThreadList.h:166
std::vector< lldb::tid_t > m_expression_tid_stack
Definition ThreadList.h:171
void SetDeferReenableBreakpointSite(bool defer)
When set to true, the breakpoint site will NOT be re-enabled directly by this plan.
ThreadPlanKind GetKind() const
Definition ThreadPlan.h:446
@ eBroadcastBitThreadSelected
Definition Thread.h:79
#define LLDB_INVALID_THREAD_ID
A class that represents a running process on the host machine.
Log * GetLog(Cat mask)
Retrieve the Log object for the channel associated with the given log enum.
Definition Log.h:332
bool StateIsRunningState(lldb::StateType state)
Check if a state represents a state where the process or thread is running.
Definition State.cpp:68
std::shared_ptr< lldb_private::BreakpointSite > BreakpointSiteSP
RunDirection
Execution directions.
std::shared_ptr< lldb_private::Thread > ThreadSP
StateType
Process and Thread States.
@ eStateSuspended
Process or thread is in a suspended state as far as the debugger is concerned while other processes o...
uint64_t addr_t
Definition lldb-types.h:80
uint64_t tid_t
Definition lldb-types.h:84