LLDB mainline
ThreadList.cpp
Go to the documentation of this file.
1//===-- ThreadList.cpp ----------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include <cstdlib>
10
11#include <algorithm>
12
13#include "lldb/Target/Process.h"
15#include "lldb/Target/Thread.h"
21#include "lldb/Utility/Log.h"
22#include "lldb/Utility/State.h"
23#include "llvm/ADT/DenseSet.h"
24#include "llvm/ADT/SmallVector.h"
25
26using namespace lldb;
27using namespace lldb_private;
28
32
36 // Use the assignment operator since it uses the mutex
37 *this = rhs;
38}
39
41 if (this != &rhs) {
42 // We only allow assignments between thread lists describing the same
43 // process. Same process implies same mutex, which means it's enough to lock
44 // just the current object.
45 assert(&m_process == &rhs.m_process);
46 assert(&GetMutex() == &rhs.GetMutex());
47 std::lock_guard<std::recursive_mutex> guard(GetMutex());
48
49 m_stop_id = rhs.m_stop_id;
50 m_threads = rhs.m_threads;
52 }
53 return *this;
54}
55
57 // Clear the thread list. Clear will take the mutex lock which will ensure
58 // that if anyone is using the list they won't get it removed while using it.
59 Clear();
60}
61
63 if (m_expression_tid_stack.empty())
64 return GetSelectedThread();
65 ThreadSP expr_thread_sp = FindThreadByID(m_expression_tid_stack.back());
66 if (expr_thread_sp)
67 return expr_thread_sp;
68 else
69 return GetSelectedThread();
70}
71
75
80
81uint32_t ThreadList::GetStopID() const { return m_stop_id; }
82
83void ThreadList::SetStopID(uint32_t stop_id) { m_stop_id = stop_id; }
84
85uint32_t ThreadList::GetSize(bool can_update) {
86 std::lock_guard<std::recursive_mutex> guard(GetMutex());
87
88 if (can_update)
89 m_process.UpdateThreadListIfNeeded();
90 return m_threads.size();
91}
92
93ThreadSP ThreadList::GetThreadAtIndex(uint32_t idx, bool can_update) {
94 std::lock_guard<std::recursive_mutex> guard(GetMutex());
95
96 if (can_update)
97 m_process.UpdateThreadListIfNeeded();
98
99 ThreadSP thread_sp;
100 if (idx < m_threads.size())
101 thread_sp = m_threads[idx];
102 return thread_sp;
103}
104
106 std::lock_guard<std::recursive_mutex> guard(GetMutex());
107
108 if (can_update)
109 m_process.UpdateThreadListIfNeeded();
110
111 ThreadSP thread_sp;
112 uint32_t idx = 0;
113 const uint32_t num_threads = m_threads.size();
114 for (idx = 0; idx < num_threads; ++idx) {
115 if (m_threads[idx]->GetID() == tid) {
116 thread_sp = m_threads[idx];
117 break;
118 }
119 }
120 return thread_sp;
121}
122
124 std::lock_guard<std::recursive_mutex> guard(GetMutex());
125
126 if (can_update)
127 m_process.UpdateThreadListIfNeeded();
128
129 ThreadSP thread_sp;
130 uint32_t idx = 0;
131 const uint32_t num_threads = m_threads.size();
132 for (idx = 0; idx < num_threads; ++idx) {
133 if (m_threads[idx]->GetProtocolID() == tid) {
134 thread_sp = m_threads[idx];
135 break;
136 }
137 }
138 return thread_sp;
139}
140
142 std::lock_guard<std::recursive_mutex> guard(GetMutex());
143
144 if (can_update)
145 m_process.UpdateThreadListIfNeeded();
146
147 ThreadSP thread_sp;
148 uint32_t idx = 0;
149 const uint32_t num_threads = m_threads.size();
150 for (idx = 0; idx < num_threads; ++idx) {
151 if (m_threads[idx]->GetID() == tid) {
152 thread_sp = m_threads[idx];
153 m_threads.erase(m_threads.begin() + idx);
154 break;
155 }
156 }
157 return thread_sp;
158}
159
161 bool can_update) {
162 std::lock_guard<std::recursive_mutex> guard(GetMutex());
163
164 if (can_update)
165 m_process.UpdateThreadListIfNeeded();
166
167 ThreadSP thread_sp;
168 uint32_t idx = 0;
169 const uint32_t num_threads = m_threads.size();
170 for (idx = 0; idx < num_threads; ++idx) {
171 if (m_threads[idx]->GetProtocolID() == tid) {
172 thread_sp = m_threads[idx];
173 m_threads.erase(m_threads.begin() + idx);
174 break;
175 }
176 }
177 return thread_sp;
178}
179
181 ThreadSP thread_sp;
182 if (thread_ptr) {
183 std::lock_guard<std::recursive_mutex> guard(GetMutex());
184
185 uint32_t idx = 0;
186 const uint32_t num_threads = m_threads.size();
187 for (idx = 0; idx < num_threads; ++idx) {
188 if (m_threads[idx].get() == thread_ptr) {
189 thread_sp = m_threads[idx];
190 break;
191 }
192 }
193 }
194 return thread_sp;
195}
196
197ThreadSP ThreadList::FindThreadByIndexID(uint32_t index_id, bool can_update) {
198 std::lock_guard<std::recursive_mutex> guard(GetMutex());
199
200 if (can_update)
201 m_process.UpdateThreadListIfNeeded();
202
203 ThreadSP thread_sp;
204 const uint32_t num_threads = m_threads.size();
205 for (uint32_t idx = 0; idx < num_threads; ++idx) {
206 if (m_threads[idx]->GetIndexID() == index_id) {
207 thread_sp = m_threads[idx];
208 break;
209 }
210 }
211 return thread_sp;
212}
213
215 // Running events should never stop, obviously...
216
217 Log *log = GetLog(LLDBLog::Step);
218
219 // The ShouldStop method of the threads can do a whole lot of work, figuring
220 // out whether the thread plan conditions are met. So we don't want to keep
221 // the ThreadList locked the whole time we are doing this.
222 // FIXME: It is possible that running code could cause new threads
223 // to be created. If that happens, we will miss asking them whether they
224 // should stop. This is not a big deal since we haven't had a chance to hang
225 // any interesting operations on those threads yet.
226
227 collection threads_copy;
228 {
229 // Scope for locker
230 std::lock_guard<std::recursive_mutex> guard(GetMutex());
231
232 m_process.UpdateThreadListIfNeeded();
233 for (lldb::ThreadSP thread_sp : m_threads) {
234 // This is an optimization... If we didn't let a thread run in between
235 // the previous stop and this one, we shouldn't have to consult it for
236 // ShouldStop. So just leave it off the list we are going to inspect.
237 // If the thread didn't run but had work to do before declaring a public
238 // stop, then also include it.
239 // On Linux, if a thread-specific conditional breakpoint was hit, it won't
240 // necessarily be the thread that hit the breakpoint itself that
241 // evaluates the conditional expression, so the thread that hit the
242 // breakpoint could still be asked to stop, even though it hasn't been
243 // allowed to run since the previous stop.
244 if (thread_sp->GetTemporaryResumeState() != eStateSuspended ||
245 thread_sp->IsStillAtLastBreakpointHit()
246 || thread_sp->ShouldRunBeforePublicStop())
247 threads_copy.push_back(thread_sp);
248 }
249
250 // It is possible the threads we were allowing to run all exited and then
251 // maybe the user interrupted or something, then fall back on looking at
252 // all threads:
253
254 if (threads_copy.size() == 0)
255 threads_copy = m_threads;
256 }
257
258 collection::iterator pos, end = threads_copy.end();
259
260 if (log) {
261 log->PutCString("");
262 LLDB_LOGF(log,
263 "ThreadList::%s: %" PRIu64 " threads, %" PRIu64
264 " unsuspended threads",
265 __FUNCTION__, (uint64_t)m_threads.size(),
266 (uint64_t)threads_copy.size());
267 }
268
269 bool did_anybody_stop_for_a_reason = false;
270
271 // If the event is an Interrupt event, then we're going to stop no matter
272 // what. Otherwise, presume we won't stop.
273 bool should_stop = false;
275 LLDB_LOGF(
276 log, "ThreadList::%s handling interrupt event, should stop set to true",
277 __FUNCTION__);
278
279 should_stop = true;
280 }
281
282 // Now we run through all the threads and get their stop info's. We want to
283 // make sure to do this first before we start running the ShouldStop, because
284 // one thread's ShouldStop could destroy information (like deleting a thread
285 // specific breakpoint another thread had stopped at) which could lead us to
286 // compute the StopInfo incorrectly. We don't need to use it here, we just
287 // want to make sure it gets computed.
288
289 for (pos = threads_copy.begin(); pos != end; ++pos) {
290 ThreadSP thread_sp(*pos);
291 thread_sp->GetStopInfo();
292 }
293
294 // If a thread needs to finish some job that can be done just on this thread
295 // before broadcastion the stop, it will signal that by returning true for
296 // ShouldRunBeforePublicStop. This variable gathers the results from that.
297 bool a_thread_needs_to_run = false;
298 for (pos = threads_copy.begin(); pos != end; ++pos) {
299 ThreadSP thread_sp(*pos);
300
301 // We should never get a stop for which no thread had a stop reason, but
302 // sometimes we do see this - for instance when we first connect to a
303 // remote stub. In that case we should stop, since we can't figure out the
304 // right thing to do and stopping gives the user control over what to do in
305 // this instance.
306 //
307 // Note, this causes a problem when you have a thread specific breakpoint,
308 // and a bunch of threads hit the breakpoint, but not the thread which we
309 // are waiting for. All the threads that are not "supposed" to hit the
310 // breakpoint are marked as having no stop reason, which is right, they
311 // should not show a stop reason. But that triggers this code and causes
312 // us to stop seemingly for no reason.
313 //
314 // Since the only way we ever saw this error was on first attach, I'm only
315 // going to trigger set did_anybody_stop_for_a_reason to true unless this
316 // is the first stop.
317 //
318 // If this becomes a problem, we'll have to have another StopReason like
319 // "StopInfoHidden" which will look invalid everywhere but at this check.
320
321 if (thread_sp->GetProcess()->GetStopID() > 1)
322 did_anybody_stop_for_a_reason = true;
323 else
324 did_anybody_stop_for_a_reason |= thread_sp->ThreadStoppedForAReason();
325
326 const bool thread_should_stop = thread_sp->ShouldStop(event_ptr);
327
328 if (thread_should_stop)
329 should_stop |= true;
330 else {
331 bool this_thread_forces_run = thread_sp->ShouldRunBeforePublicStop();
332 a_thread_needs_to_run |= this_thread_forces_run;
333 if (this_thread_forces_run)
334 LLDB_LOG(log,
335 "ThreadList::{0} thread: {1:x}, "
336 "says it needs to run before public stop.",
337 __FUNCTION__, thread_sp->GetID());
338 }
339 }
340
341 if (a_thread_needs_to_run) {
342 should_stop = false;
343 } else if (!should_stop && !did_anybody_stop_for_a_reason) {
344 should_stop = true;
345 LLDB_LOGF(log,
346 "ThreadList::%s we stopped but no threads had a stop reason, "
347 "overriding should_stop and stopping.",
348 __FUNCTION__);
349 }
350
351 LLDB_LOGF(log, "ThreadList::%s overall should_stop = %i", __FUNCTION__,
352 should_stop);
353
354 if (should_stop) {
355 for (pos = threads_copy.begin(); pos != end; ++pos) {
356 ThreadSP thread_sp(*pos);
357 thread_sp->WillStop();
358 }
359 }
360
361 return should_stop;
362}
363
365 std::lock_guard<std::recursive_mutex> guard(GetMutex());
366
367 Vote result = eVoteNoOpinion;
368 m_process.UpdateThreadListIfNeeded();
369 collection::iterator pos, end = m_threads.end();
370
371 Log *log = GetLog(LLDBLog::Step);
372
373 LLDB_LOGF(log, "ThreadList::%s %" PRIu64 " threads", __FUNCTION__,
374 (uint64_t)m_threads.size());
375
376 // Run through the threads and ask whether we should report this event. For
377 // stopping, a YES vote wins over everything. A NO vote wins over NO
378 // opinion. The exception is if a thread has work it needs to force before
379 // a public stop, which overrides everyone else's opinion:
380 for (pos = m_threads.begin(); pos != end; ++pos) {
381 ThreadSP thread_sp(*pos);
382 if (thread_sp->ShouldRunBeforePublicStop()) {
383 LLDB_LOG(log, "Thread {0:x} has private business to complete, overrode "
384 "the should report stop.", thread_sp->GetID());
385 result = eVoteNo;
386 break;
387 }
388
389 const Vote vote = thread_sp->ShouldReportStop(event_ptr);
390 switch (vote) {
391 case eVoteNoOpinion:
392 continue;
393
394 case eVoteYes:
395 result = eVoteYes;
396 break;
397
398 case eVoteNo:
399 if (result == eVoteNoOpinion) {
400 result = eVoteNo;
401 } else {
402 LLDB_LOG(log,
403 "Thread {0:x} voted {1}, but lost out because result was {2}",
404 thread_sp->GetID(), vote, result);
405 }
406 break;
407 }
408 }
409 LLDB_LOG(log, "Returning {0}", result);
410 return result;
411}
412
414 std::lock_guard<std::recursive_mutex> guard(GetMutex());
415
416 m_process.UpdateThreadListIfNeeded();
417 collection::iterator pos, end = m_threads.end();
418 for (pos = m_threads.begin(); pos != end; ++pos) {
419 ThreadSP thread_sp(*pos);
420 thread_sp->SetShouldReportStop(vote);
421 }
422}
423
425
426 std::lock_guard<std::recursive_mutex> guard(GetMutex());
427
428 Vote result = eVoteNoOpinion;
429 m_process.UpdateThreadListIfNeeded();
430 collection::iterator pos, end = m_threads.end();
431
432 // Run through the threads and ask whether we should report this event. The
433 // rule is NO vote wins over everything, a YES vote wins over no opinion.
434
435 Log *log = GetLog(LLDBLog::Step);
436
437 for (pos = m_threads.begin(); pos != end; ++pos) {
438 if ((*pos)->GetResumeState() != eStateSuspended) {
439 switch ((*pos)->ShouldReportRun(event_ptr)) {
440 case eVoteNoOpinion:
441 continue;
442 case eVoteYes:
443 if (result == eVoteNoOpinion)
444 result = eVoteYes;
445 break;
446 case eVoteNo:
447 LLDB_LOGF(log,
448 "ThreadList::ShouldReportRun() thread %d (0x%4.4" PRIx64
449 ") says don't report.",
450 (*pos)->GetIndexID(), (*pos)->GetID());
451 result = eVoteNo;
452 break;
453 }
454 }
455 }
456 return result;
457}
458
460 std::lock_guard<std::recursive_mutex> guard(GetMutex());
461 m_stop_id = 0;
462 m_threads.clear();
464}
465
467 std::lock_guard<std::recursive_mutex> guard(GetMutex());
468 const uint32_t num_threads = m_threads.size();
469 for (uint32_t idx = 0; idx < num_threads; ++idx) {
470 m_threads[idx]->DestroyThread();
471 }
472}
473
475 std::lock_guard<std::recursive_mutex> guard(GetMutex());
476
477 m_process.UpdateThreadListIfNeeded();
478
481 "Turning off notification of new threads while single stepping "
482 "a thread.");
483
484 collection::iterator pos, end = m_threads.end();
485 for (pos = m_threads.begin(); pos != end; ++pos)
486 (*pos)->RefreshStateAfterStop();
487}
488
490 // You don't need to update the thread list here, because only threads that
491 // you currently know about have any thread plans.
492 std::lock_guard<std::recursive_mutex> guard(GetMutex());
493
494 collection::iterator pos, end = m_threads.end();
495 for (pos = m_threads.begin(); pos != end; ++pos)
496 (*pos)->DiscardThreadPlans(true);
497}
498
500 // Run through the threads and perform their momentary actions. But we only
501 // do this for threads that are running, user suspended threads stay where
502 // they are.
503
504 std::lock_guard<std::recursive_mutex> guard(GetMutex());
505 m_process.UpdateThreadListIfNeeded();
506
507 collection::iterator pos, end = m_threads.end();
508
509 // Clear tracking state from the previous stop and pop any leftover
510 // StepOverBreakpoint plans. This gives us a clean slate: plans will be
511 // recreated fresh by SetupToStepOverBreakpointIfNeeded below, and the
512 // batching logic will recompute deferred state from scratch.
514 for (const auto &thread_sp : m_threads) {
515 ThreadPlan *plan = thread_sp->GetCurrentPlan();
516 if (plan && plan->GetKind() == ThreadPlan::eKindStepOverBreakpoint) {
517 auto *bp_plan = static_cast<ThreadPlanStepOverBreakpoint *>(plan);
518 // Only pop plans created by our batching logic (deferred plans).
519 // Plans from the single-thread path must not be popped, as doing so
520 // would change the StopOthers scan result and cause other threads
521 // to lose their breakpoint stop reason.
522 if (bp_plan->GetDeferReenableBreakpointSite()) {
523 // Suppress the re-enable side effect in DidPop(), the breakpoint
524 // may still be disabled from the previous batch, and we don't want
525 // to toggle it. The new plans will handle re-enable correctly.
526 bp_plan->SetReenabledBreakpointSite();
527 thread_sp->DiscardPlan();
528 }
529 }
530 }
531
532 // Go through the threads and see if any thread wants to run just itself.
533 // if so then pick one and run it.
534
535 // Collect threads for batched vCont for multiple threads at the same
536 // breakpoint.
537 llvm::SmallVector<ThreadSP> batched_step_threads;
538
539 ThreadList run_me_only_list(m_process);
540
541 run_me_only_list.SetStopID(m_process.GetStopID());
542
543 // One or more threads might want to "Stop Others". We want to handle all
544 // those requests first. And if there is a thread that wanted to "resume
545 // before a public stop", let it get the first crack:
546 // There are two special kinds of thread that have priority for "StopOthers":
547 // a "ShouldRunBeforePublicStop thread, or the currently selected thread. If
548 // we find one satisfying that critereon, put it here.
549 ThreadSP thread_to_run;
550 for (pos = m_threads.begin(); pos != end; ++pos) {
551 ThreadSP thread_sp(*pos);
552 if (thread_sp->GetResumeState() != eStateSuspended &&
553 thread_sp->GetCurrentPlan()->StopOthers()) {
554 if (thread_sp->IsOperatingSystemPluginThread() &&
555 !thread_sp->GetBackingThread())
556 continue;
557
558 // You can't say "stop others" and also want yourself to be suspended.
559 assert(thread_sp->GetCurrentPlan()->RunState() != eStateSuspended);
560 run_me_only_list.AddThread(thread_sp);
561
562 if (thread_sp == GetSelectedThread())
563 thread_to_run = thread_sp;
564
565 if (thread_sp->ShouldRunBeforePublicStop()) {
566 // This takes precedence, so if we find one of these, service it:
567 thread_to_run = thread_sp;
568 break;
569 }
570 }
571 }
572
573 if (run_me_only_list.GetSize(false) > 0 && !thread_to_run) {
574 if (run_me_only_list.GetSize(false) == 1) {
575 thread_to_run = run_me_only_list.GetThreadAtIndex(0);
576 } else {
577 int random_thread =
578 (int)((run_me_only_list.GetSize(false) * (double)rand()) /
579 (RAND_MAX + 1.0));
580 thread_to_run = run_me_only_list.GetThreadAtIndex(random_thread);
581 }
582 }
583
584 if (thread_to_run != nullptr) {
585 direction = thread_to_run->GetCurrentPlan()->GetDirection();
586 } else {
587 direction = m_process.GetBaseDirection();
588 }
589
590 // Give all the threads that are likely to run a last chance to set up their
591 // state before we negotiate who is actually going to get a chance to run...
592 // Don't set to resume suspended threads, and if any thread wanted to stop
593 // others, only call setup on the threads that request StopOthers...
594 if (thread_to_run != nullptr) {
595 // See if any thread wants to run stopping others. If it does, then we
596 // won't setup the other threads for resume, since they aren't going to get
597 // a chance to run. This is necessary because the SetupForResume might add
598 // "StopOthers" plans which would then get to be part of the who-gets-to-run
599 // negotiation, but they're coming in after the fact, and the threads that
600 // are already set up should take priority.
601 if (thread_to_run->SetupToStepOverBreakpointIfNeeded(direction)) {
602 // We only need to step over breakpoints when running forward, and the
603 // step-over-breakpoint plan itself wants to run forward, so this
604 // keeps our desired direction.
605 assert(thread_to_run->GetCurrentPlan()->GetDirection() == direction);
606 }
607 } else {
608 // Pre-scan to find all threads that need to step over a breakpoint,
609 // and group them by breakpoint address. This optimization allows us to
610 // step multiple threads over the same breakpoint with minimal breakpoint
611 // swaps, only the last thread in each group will re-enable the breakpoint.
612 llvm::DenseMap<lldb::addr_t, llvm::SmallVector<ThreadSP>> breakpoint_groups;
613 bool found_run_before_public_stop = false;
614
615 for (pos = m_threads.begin(); pos != end; ++pos) {
616 ThreadSP thread_sp(*pos);
617 if (thread_sp->GetResumeState() != eStateSuspended) {
618 if (thread_sp->IsOperatingSystemPluginThread() &&
619 !thread_sp->GetBackingThread())
620 continue;
621 if (thread_sp->SetupToStepOverBreakpointIfNeeded(direction)) {
622 // We only need to step over breakpoints when running forward, and the
623 // step-over-breakpoint plan itself wants to run forward, so this
624 // keeps our desired direction.
625 assert(thread_sp->GetCurrentPlan()->GetDirection() == direction);
626 // You can't say "stop others" and also want yourself to be suspended.
627 assert(thread_sp->GetCurrentPlan()->RunState() != eStateSuspended);
628
629 // Get the breakpoint address from the step-over-breakpoint plan.
630 ThreadPlan *current_plan = thread_sp->GetCurrentPlan();
631 if (current_plan &&
632 current_plan->GetKind() == ThreadPlan::eKindStepOverBreakpoint) {
634 static_cast<ThreadPlanStepOverBreakpoint *>(current_plan);
635 lldb::addr_t bp_addr = bp_plan->GetBreakpointLoadAddress();
636 breakpoint_groups[bp_addr].push_back(thread_sp);
637 }
638
639 thread_to_run = thread_sp;
640 if (thread_sp->ShouldRunBeforePublicStop()) {
641 // This takes precedence, so if we find one of these, service it:
642 found_run_before_public_stop = true;
643 break;
644 }
645 }
646 }
647 }
648
649 // Only apply batching optimization if we have a complete picture of
650 // breakpoint groups. If a ShouldRunBeforePublicStop thread caused the
651 // scan to exit early, the groups are incomplete and the priority thread
652 // must run solo. Deferred state will be cleaned up on next WillResume().
653 if (!found_run_before_public_stop) {
654 // For each group of threads at the same breakpoint, register them with
655 // ThreadList and set them to use deferred re-enable. The breakpoint will
656 // only be re-enabled when ALL threads have finished stepping over it.
657 // Also collect threads for batched vCont if multiple threads at same BP.
658 for (auto &group : breakpoint_groups) {
659 lldb::addr_t bp_addr = group.first;
660 llvm::SmallVector<ThreadSP> &threads = group.second;
661
662 if (threads.size() > 1) {
663 // Use tracking since multiple threads are stepping over the same
664 // breakpoint.
665 for (ThreadSP &thread_sp : threads) {
666 // Register this thread as stepping over the breakpoint.
667 RegisterThreadSteppingOverBreakpoint(bp_addr, thread_sp->GetID());
668
669 // Set the plan to defer re-enabling (use callback instead).
670 ThreadPlan *plan = thread_sp->GetCurrentPlan();
671 // Verify the plan is actually a StepOverBreakpoint plan.
672 if (plan &&
675 static_cast<ThreadPlanStepOverBreakpoint *>(plan);
676 bp_plan->SetDeferReenableBreakpointSite(true);
677 }
678 }
679
680 // Pick the largest group for batched vCont.
681 if (threads.size() > batched_step_threads.size())
682 batched_step_threads = threads;
683 }
684 // Keeps default behavior for a single thread at breakpoint.
685 }
686
687 // If we found a batch, use the first thread as thread_to_run.
688 if (!batched_step_threads.empty())
689 thread_to_run = batched_step_threads[0];
690 }
691 }
692
693 if (thread_to_run != nullptr) {
695 "Turning on notification of new threads while single "
696 "stepping a thread.");
697 m_process.StartNoticingNewThreads();
698 } else {
700 "Turning off notification of new threads while single "
701 "stepping a thread.");
702 m_process.StopNoticingNewThreads();
703 }
704
705 bool need_to_resume = true;
706
707 if (!batched_step_threads.empty()) {
708 // Batched stepping: all threads in the batch step together,
709 // all other threads stay suspended.
710 llvm::DenseSet<lldb::tid_t> batch_tids;
711 for (ThreadSP &thread_sp : batched_step_threads)
712 batch_tids.insert(thread_sp->GetID());
713
714 for (const auto &thread_sp : m_threads) {
715 if (batch_tids.count(thread_sp->GetID()) > 0) {
716 // This thread is in the batch, let it step.
717 if (!thread_sp->ShouldResume(thread_sp->GetCurrentPlan()->RunState()))
718 need_to_resume = false;
719 } else {
720 // Suspend it since it's not in the batch.
721 thread_sp->ShouldResume(eStateSuspended);
722 }
723 }
724 } else if (thread_to_run == nullptr) {
725 // Everybody runs as they wish:
726 for (pos = m_threads.begin(); pos != end; ++pos) {
727 ThreadSP thread_sp(*pos);
728 StateType run_state;
729 if (thread_sp->GetResumeState() != eStateSuspended)
730 run_state = thread_sp->GetCurrentPlan()->RunState();
731 else
732 run_state = eStateSuspended;
733 if (!thread_sp->ShouldResume(run_state))
734 need_to_resume = false;
735 }
736 if (need_to_resume) {
737 // Ensure all threads are running in the right direction
738 for (pos = m_threads.begin(); pos != end; ++pos) {
739 ThreadSP thread_sp(*pos);
740 while (thread_sp->GetCurrentPlan()->GetDirection() != direction) {
741 // This can't discard the base plan because its direction is
742 // m_process.GetBaseDirection() i.e. `direction`.
743 thread_sp->DiscardPlan();
744 }
745 }
746 }
747 } else {
748 for (pos = m_threads.begin(); pos != end; ++pos) {
749 ThreadSP thread_sp(*pos);
750 if (thread_sp == thread_to_run) {
751 // Note, a thread might be able to fulfil it's plan w/o actually
752 // resuming. An example of this is a step that changes the current
753 // inlined function depth w/o moving the PC. Check that here:
754 if (!thread_sp->ShouldResume(thread_sp->GetCurrentPlan()->RunState()))
755 need_to_resume = false;
756 } else
757 thread_sp->ShouldResume(eStateSuspended);
758 }
759 }
760
761 return need_to_resume;
762}
763
765 std::lock_guard<std::recursive_mutex> guard(GetMutex());
766 collection::iterator pos, end = m_threads.end();
767 for (pos = m_threads.begin(); pos != end; ++pos) {
768 // Don't clear out threads that aren't going to get a chance to run, rather
769 // leave their state for the next time around.
770 ThreadSP thread_sp(*pos);
771 if (thread_sp->GetTemporaryResumeState() != eStateSuspended)
772 thread_sp->DidResume();
773 }
774}
775
777 std::lock_guard<std::recursive_mutex> guard(GetMutex());
778 collection::iterator pos, end = m_threads.end();
779 for (pos = m_threads.begin(); pos != end; ++pos) {
780 // Notify threads that the process just stopped. Note, this currently
781 // assumes that all threads in the list stop when the process stops. In
782 // the future we will want to support a debugging model where some threads
783 // continue to run while others are stopped. We either need to handle that
784 // somehow here or create a special thread list containing only threads
785 // which will stop in the code that calls this method (currently
786 // Process::SetPrivateState).
787 ThreadSP thread_sp(*pos);
788 if (StateIsRunningState(thread_sp->GetState()))
789 thread_sp->DidStop();
790 }
791}
792
794 std::lock_guard<std::recursive_mutex> guard(GetMutex());
796 if (!thread_sp.get()) {
797 if (m_threads.size() == 0)
798 return thread_sp;
799 m_selected_tid = m_threads[0]->GetID();
800 thread_sp = m_threads[0];
801 }
802 return thread_sp;
803}
804
806 std::lock_guard<std::recursive_mutex> guard(GetMutex());
807 ThreadSP selected_thread_sp(FindThreadByID(tid));
808 if (selected_thread_sp) {
809 m_selected_tid = tid;
810 selected_thread_sp->SetDefaultFileAndLineToSelectedFrame();
811 } else
813
814 if (notify)
816
818}
819
820bool ThreadList::SetSelectedThreadByIndexID(uint32_t index_id, bool notify) {
821 std::lock_guard<std::recursive_mutex> guard(GetMutex());
822 ThreadSP selected_thread_sp(FindThreadByIndexID(index_id));
823 if (selected_thread_sp.get()) {
824 m_selected_tid = selected_thread_sp->GetID();
825 selected_thread_sp->SetDefaultFileAndLineToSelectedFrame();
826 } else
828
829 if (notify)
831
833}
834
836 ThreadSP selected_thread_sp(FindThreadByID(tid));
837 if (selected_thread_sp->EventTypeHasListeners(
839 auto data_sp =
840 std::make_shared<Thread::ThreadEventData>(selected_thread_sp);
841 selected_thread_sp->BroadcastEvent(Thread::eBroadcastBitThreadSelected,
842 data_sp);
843 }
844}
845
847 if (this != &rhs) {
848 // We only allow assignments between thread lists describing the same
849 // process. Same process implies same mutex, which means it's enough to lock
850 // just the current object.
851 assert(&m_process == &rhs.m_process);
852 assert(&GetMutex() == &rhs.GetMutex());
853 std::lock_guard<std::recursive_mutex> guard(GetMutex());
854
855 m_stop_id = rhs.m_stop_id;
856 m_threads.swap(rhs.m_threads);
858
859 // Now we look for threads that we are done with and make sure to clear
860 // them up as much as possible so anyone with a shared pointer will still
861 // have a reference, but the thread won't be of much use. Using
862 // std::weak_ptr for all backward references (such as a thread to a
863 // process) will eventually solve this issue for us, but for now, we need
864 // to work around the issue
865 collection::iterator rhs_pos, rhs_end = rhs.m_threads.end();
866 for (rhs_pos = rhs.m_threads.begin(); rhs_pos != rhs_end; ++rhs_pos) {
867 // If this thread has already been destroyed, we don't need to look for
868 // it to destroy it again.
869 if (!(*rhs_pos)->IsValid())
870 continue;
871
872 const lldb::tid_t tid = (*rhs_pos)->GetID();
873 bool thread_is_alive = false;
874 const uint32_t num_threads = m_threads.size();
875 for (uint32_t idx = 0; idx < num_threads; ++idx) {
876 ThreadSP backing_thread = m_threads[idx]->GetBackingThread();
877 if (m_threads[idx]->GetID() == tid ||
878 (backing_thread && backing_thread->GetID() == tid)) {
879 thread_is_alive = true;
880 break;
881 }
882 }
883 if (!thread_is_alive) {
884 (*rhs_pos)->DestroyThread();
885 }
886 }
887 }
888}
889
891 std::lock_guard<std::recursive_mutex> guard(GetMutex());
892 collection::iterator pos, end = m_threads.end();
893 for (pos = m_threads.begin(); pos != end; ++pos)
894 (*pos)->Flush();
895}
896
897std::recursive_mutex &ThreadList::GetMutex() const {
898 return m_process.m_thread_mutex;
899}
900
902 lldb::ThreadSP thread_sp)
904 if (thread_sp) {
905 m_tid = thread_sp->GetID();
906 m_thread_list = &thread_sp->GetProcess()->GetThreadList();
907 m_thread_list->PushExpressionExecutionThread(m_tid);
908 }
909}
910
912 tid_t tid) {
913 std::lock_guard<std::recursive_mutex> guard(GetMutex());
914 m_threads_stepping_over_bp[breakpoint_addr].insert(tid);
915
916 Log *log = GetLog(LLDBLog::Step);
917 LLDB_LOGF(
918 log,
919 "ThreadList::%s: Registered thread 0x%" PRIx64
920 " stepping over breakpoint at 0x%" PRIx64 " (now %zu threads)",
921 __FUNCTION__, tid, breakpoint_addr,
922 static_cast<size_t>(m_threads_stepping_over_bp[breakpoint_addr].size()));
923}
924
926 tid_t tid) {
927 std::lock_guard<std::recursive_mutex> guard(GetMutex());
928
929 Log *log = GetLog(LLDBLog::Step);
930
931 auto it = m_threads_stepping_over_bp.find(breakpoint_addr);
932 if (it == m_threads_stepping_over_bp.end()) {
933 // No threads registered for this breakpoint, re-enable directly.
934 LLDB_LOGF(log,
935 "ThreadList::%s: Thread 0x%" PRIx64
936 " finished stepping over breakpoint at 0x%" PRIx64
937 " but no threads were registered, re-enabling directly",
938 __FUNCTION__, tid, breakpoint_addr);
939 if (BreakpointSiteSP bp_site_sp =
940 m_process.GetBreakpointSiteList().FindByAddress(breakpoint_addr))
941 m_process.EnableBreakpointSite(bp_site_sp.get());
942 return;
943 }
944
945 // Remove this thread from the set.
946 it->second.erase(tid);
947
948 LLDB_LOGF(log,
949 "ThreadList::%s: Thread 0x%" PRIx64
950 " finished stepping over breakpoint at 0x%" PRIx64
951 " (%zu threads remaining)",
952 __FUNCTION__, tid, breakpoint_addr,
953 static_cast<size_t>(it->second.size()));
954
955 // If no more threads are stepping over this breakpoint, re-enable it.
956 if (it->second.empty()) {
957 LLDB_LOGF(log,
958 "ThreadList::%s: All threads finished stepping over breakpoint "
959 "at 0x%" PRIx64 ", re-enabling breakpoint",
960 __FUNCTION__, breakpoint_addr);
961
962 if (BreakpointSiteSP bp_site_sp =
963 m_process.GetBreakpointSiteList().FindByAddress(breakpoint_addr))
964 m_process.EnableBreakpointSite(bp_site_sp.get());
965
966 // Clean up the entry.
968 }
969}
#define LLDB_LOG(log,...)
The LLDB_LOG* macros defined below are the way to emit log messages.
Definition Log.h:369
#define LLDB_LOGF_VERBOSE(log,...)
Definition Log.h:390
#define LLDB_LOGF(log,...)
Definition Log.h:383
void PutCString(const char *cstr)
Definition Log.cpp:145
static bool GetInterruptedFromEvent(const Event *event_ptr)
Definition Process.cpp:4580
std::vector< lldb::ThreadSP > collection
void AddThread(const lldb::ThreadSP &thread_sp)
ExpressionExecutionThreadPusher(ThreadList &thread_list, lldb::tid_t tid)
Definition ThreadList.h:53
lldb::ThreadSP RemoveThreadByID(lldb::tid_t tid, bool can_update=true)
lldb::ThreadSP GetSelectedThread()
bool ShouldStop(Event *event_ptr)
Vote ShouldReportStop(Event *event_ptr)
uint32_t GetStopID() const
void ThreadFinishedSteppingOverBreakpoint(lldb::addr_t breakpoint_addr, lldb::tid_t tid)
Called by ThreadPlanStepOverBreakpoint when a thread finishes stepping over a breakpoint.
bool SetSelectedThreadByIndexID(uint32_t index_id, bool notify=false)
lldb::ThreadSP FindThreadByProtocolID(lldb::tid_t tid, bool can_update=true)
uint32_t GetSize(bool can_update=true)
void PopExpressionExecutionThread(lldb::tid_t tid)
bool WillResume(lldb::RunDirection &direction)
The thread list asks tells all the threads it is about to resume.
bool SetSelectedThreadByID(lldb::tid_t tid, bool notify=false)
lldb::ThreadSP FindThreadByIndexID(uint32_t index_id, bool can_update=true)
Vote ShouldReportRun(Event *event_ptr)
void SetStopID(uint32_t stop_id)
lldb::ThreadSP GetThreadSPForThreadPtr(Thread *thread_ptr)
lldb::ThreadSP GetThreadAtIndex(uint32_t idx, bool can_update=true)
uint32_t m_stop_id
The process stop ID that this thread list is valid for.
Definition ThreadList.h:168
std::recursive_mutex & GetMutex() const override
lldb::ThreadSP FindThreadByID(lldb::tid_t tid, bool can_update=true)
lldb::ThreadSP GetExpressionExecutionThread()
const ThreadList & operator=(const ThreadList &rhs)
Precondition: both thread lists must be belong to the same process.
void RegisterThreadSteppingOverBreakpoint(lldb::addr_t breakpoint_addr, lldb::tid_t tid)
Register a thread that is about to step over a breakpoint.
void Update(ThreadList &rhs)
Precondition: both thread lists must be belong to the same process.
void PushExpressionExecutionThread(lldb::tid_t tid)
void SetShouldReportStop(Vote vote)
lldb::tid_t m_selected_tid
For targets that need the notion of a current thread.
Definition ThreadList.h:170
void NotifySelectedThreadChanged(lldb::tid_t tid)
ThreadList(Process &process)
lldb::ThreadSP RemoveThreadByProtocolID(lldb::tid_t tid, bool can_update=true)
llvm::DenseMap< lldb::addr_t, llvm::DenseSet< lldb::tid_t > > m_threads_stepping_over_bp
Tracks which threads are currently stepping over each breakpoint address.
Definition ThreadList.h:178
Process & m_process
The process that manages this thread list.
Definition ThreadList.h:166
std::vector< lldb::tid_t > m_expression_tid_stack
Definition ThreadList.h:171
void SetDeferReenableBreakpointSite(bool defer)
When set to true, the breakpoint site will NOT be re-enabled directly by this plan.
ThreadPlanKind GetKind() const
Definition ThreadPlan.h:446
@ eBroadcastBitThreadSelected
Definition Thread.h:80
#define LLDB_INVALID_THREAD_ID
A class that represents a running process on the host machine.
Log * GetLog(Cat mask)
Retrieve the Log object for the channel associated with the given log enum.
Definition Log.h:332
bool StateIsRunningState(lldb::StateType state)
Check if a state represents a state where the process or thread is running.
Definition State.cpp:68
std::shared_ptr< lldb_private::BreakpointSite > BreakpointSiteSP
RunDirection
Execution directions.
std::shared_ptr< lldb_private::Thread > ThreadSP
StateType
Process and Thread States.
@ eStateSuspended
Process or thread is in a suspended state as far as the debugger is concerned while other processes o...
uint64_t addr_t
Definition lldb-types.h:80
uint64_t tid_t
Definition lldb-types.h:84