1 /*
2 * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
3 * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
4 * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
5 * Distributed under the terms of the MIT License.
6 *
7 * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
8 * Distributed under the terms of the NewOS License.
9 */
10
11
12 /*! Threading routines */
13
14
15 #include <thread.h>
16
17 #include <errno.h>
18 #include <malloc.h>
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <string.h>
22 #include <sys/resource.h>
23
24 #include <algorithm>
25
26 #include <OS.h>
27
28 #include <util/AutoLock.h>
29 #include <util/ThreadAutoLock.h>
30
31 #include <arch/debug.h>
32 #include <boot/kernel_args.h>
33 #include <condition_variable.h>
34 #include <cpu.h>
35 #include <int.h>
36 #include <kimage.h>
37 #include <kscheduler.h>
38 #include <ksignal.h>
39 #include <Notifications.h>
40 #include <real_time_clock.h>
41 #include <slab/Slab.h>
42 #include <smp.h>
43 #include <syscalls.h>
44 #include <syscall_restart.h>
45 #include <team.h>
46 #include <tls.h>
47 #include <user_runtime.h>
48 #include <user_thread.h>
49 #include <vfs.h>
50 #include <vm/vm.h>
51 #include <vm/VMAddressSpace.h>
52 #include <wait_for_objects.h>
53
54 #include "TeamThreadTables.h"
55
56
57 //#define TRACE_THREAD
58 #ifdef TRACE_THREAD
59 # define TRACE(x) dprintf x
60 #else
61 # define TRACE(x) ;
62 #endif
63
64
65 #define THREAD_MAX_MESSAGE_SIZE 65536
66
67
68 // #pragma mark - ThreadHashTable
69
70
71 typedef BKernel::TeamThreadTable<Thread> ThreadHashTable;
72
73
74 // thread list
75 static Thread sIdleThreads[SMP_MAX_CPUS];
76 static ThreadHashTable sThreadHash;
77 static rw_spinlock sThreadHashLock = B_RW_SPINLOCK_INITIALIZER;
78 static thread_id sNextThreadID = 2;
79 // ID 1 is allocated for the kernel by Team::Team() behind our back
80
81 // some arbitrarily chosen limits -- should probably depend on the available
82 // memory
83 static int32 sMaxThreads = 4096;
84 static int32 sUsedThreads = 0;
85
86 spinlock gThreadCreationLock = B_SPINLOCK_INITIALIZER;
87
88
89 struct UndertakerEntry : DoublyLinkedListLinkImpl<UndertakerEntry> {
90 Thread* thread;
91 team_id teamID;
92
UndertakerEntryUndertakerEntry93 UndertakerEntry(Thread* thread, team_id teamID)
94 :
95 thread(thread),
96 teamID(teamID)
97 {
98 }
99 };
100
101
102 struct ThreadEntryArguments {
103 status_t (*kernelFunction)(void* argument);
104 void* argument;
105 bool enterUserland;
106 };
107
108 struct UserThreadEntryArguments : ThreadEntryArguments {
109 addr_t userlandEntry;
110 void* userlandArgument1;
111 void* userlandArgument2;
112 pthread_t pthread;
113 arch_fork_arg* forkArgs;
114 uint32 flags;
115 };
116
117
118 class ThreadNotificationService : public DefaultNotificationService {
119 public:
ThreadNotificationService()120 ThreadNotificationService()
121 : DefaultNotificationService("threads")
122 {
123 }
124
Notify(uint32 eventCode,team_id teamID,thread_id threadID,Thread * thread=NULL)125 void Notify(uint32 eventCode, team_id teamID, thread_id threadID,
126 Thread* thread = NULL)
127 {
128 char eventBuffer[180];
129 KMessage event;
130 event.SetTo(eventBuffer, sizeof(eventBuffer), THREAD_MONITOR);
131 event.AddInt32("event", eventCode);
132 event.AddInt32("team", teamID);
133 event.AddInt32("thread", threadID);
134 if (thread != NULL)
135 event.AddPointer("threadStruct", thread);
136
137 DefaultNotificationService::Notify(event, eventCode);
138 }
139
Notify(uint32 eventCode,Thread * thread)140 void Notify(uint32 eventCode, Thread* thread)
141 {
142 return Notify(eventCode, thread->id, thread->team->id, thread);
143 }
144 };
145
146
147 static DoublyLinkedList<UndertakerEntry> sUndertakerEntries;
148 static spinlock sUndertakerLock = B_SPINLOCK_INITIALIZER;
149 static ConditionVariable sUndertakerCondition;
150 static ThreadNotificationService sNotificationService;
151
152
153 // object cache to allocate thread structures from
154 static object_cache* sThreadCache;
155
156
157 // #pragma mark - Thread
158
159
160 /*! Constructs a thread.
161
162 \param name The thread's name.
163 \param threadID The ID to be assigned to the new thread. If
164 \code < 0 \endcode a fresh one is allocated.
165 \param cpu The CPU the thread shall be assigned.
166 */
Thread(const char * name,thread_id threadID,struct cpu_ent * cpu)167 Thread::Thread(const char* name, thread_id threadID, struct cpu_ent* cpu)
168 :
169 flags(0),
170 serial_number(-1),
171 hash_next(NULL),
172 team_next(NULL),
173 priority(-1),
174 io_priority(-1),
175 cpu(cpu),
176 previous_cpu(NULL),
177 cpumask(),
178 pinned_to_cpu(0),
179 sig_block_mask(0),
180 sigsuspend_original_unblocked_mask(0),
181 user_signal_context(NULL),
182 signal_stack_base(0),
183 signal_stack_size(0),
184 signal_stack_enabled(false),
185 in_kernel(true),
186 has_yielded(false),
187 user_thread(NULL),
188 fault_handler(0),
189 page_faults_allowed(1),
190 team(NULL),
191 select_infos(NULL),
192 kernel_stack_area(-1),
193 kernel_stack_base(0),
194 user_stack_area(-1),
195 user_stack_base(0),
196 user_local_storage(0),
197 kernel_errno(0),
198 user_time(0),
199 kernel_time(0),
200 last_time(0),
201 cpu_clock_offset(0),
202 post_interrupt_callback(NULL),
203 post_interrupt_data(NULL)
204 {
205 id = threadID >= 0 ? threadID : allocate_thread_id();
206 visible = false;
207
208 // init locks
209 char lockName[32];
210 snprintf(lockName, sizeof(lockName), "Thread:%" B_PRId32, id);
211 mutex_init_etc(&fLock, lockName, MUTEX_FLAG_CLONE_NAME);
212
213 B_INITIALIZE_SPINLOCK(&time_lock);
214 B_INITIALIZE_SPINLOCK(&scheduler_lock);
215 B_INITIALIZE_RW_SPINLOCK(&team_lock);
216
217 // init name
218 if (name != NULL)
219 strlcpy(this->name, name, B_OS_NAME_LENGTH);
220 else
221 strcpy(this->name, "unnamed thread");
222
223 exit.status = 0;
224
225 list_init(&exit.waiters);
226
227 exit.sem = -1;
228 msg.write_sem = -1;
229 msg.read_sem = -1;
230
231 // add to thread table -- yet invisible
232 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock);
233 sThreadHash.Insert(this);
234 }
235
236
~Thread()237 Thread::~Thread()
238 {
239 // Delete resources that should actually be deleted by the thread itself,
240 // when it exited, but that might still exist, if the thread was never run.
241
242 if (user_stack_area >= 0)
243 delete_area(user_stack_area);
244
245 DeleteUserTimers(false);
246
247 // delete the resources, that may remain in either case
248
249 if (kernel_stack_area >= 0)
250 delete_area(kernel_stack_area);
251
252 fPendingSignals.Clear();
253
254 if (exit.sem >= 0)
255 delete_sem(exit.sem);
256 if (msg.write_sem >= 0)
257 delete_sem(msg.write_sem);
258 if (msg.read_sem >= 0)
259 delete_sem(msg.read_sem);
260
261 scheduler_on_thread_destroy(this);
262
263 mutex_destroy(&fLock);
264
265 // remove from thread table
266 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock);
267 sThreadHash.Remove(this);
268 }
269
270
271 /*static*/ status_t
Create(const char * name,Thread * & _thread)272 Thread::Create(const char* name, Thread*& _thread)
273 {
274 Thread* thread = new Thread(name, -1, NULL);
275 if (thread == NULL)
276 return B_NO_MEMORY;
277
278 status_t error = thread->Init(false);
279 if (error != B_OK) {
280 delete thread;
281 return error;
282 }
283
284 _thread = thread;
285 return B_OK;
286 }
287
288
289 /*static*/ Thread*
Get(thread_id id)290 Thread::Get(thread_id id)
291 {
292 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock);
293 Thread* thread = sThreadHash.Lookup(id);
294 if (thread != NULL)
295 thread->AcquireReference();
296 return thread;
297 }
298
299
300 /*static*/ Thread*
GetAndLock(thread_id id)301 Thread::GetAndLock(thread_id id)
302 {
303 // look it up and acquire a reference
304 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock);
305 Thread* thread = sThreadHash.Lookup(id);
306 if (thread == NULL)
307 return NULL;
308
309 thread->AcquireReference();
310 threadHashLocker.Unlock();
311
312 // lock and check, if it is still in the hash table
313 thread->Lock();
314 threadHashLocker.Lock();
315
316 if (sThreadHash.Lookup(id) == thread)
317 return thread;
318
319 threadHashLocker.Unlock();
320
321 // nope, the thread is no longer in the hash table
322 thread->UnlockAndReleaseReference();
323
324 return NULL;
325 }
326
327
328 /*static*/ Thread*
GetDebug(thread_id id)329 Thread::GetDebug(thread_id id)
330 {
331 return sThreadHash.Lookup(id, false);
332 }
333
334
335 /*static*/ bool
IsAlive(thread_id id)336 Thread::IsAlive(thread_id id)
337 {
338 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock);
339 return sThreadHash.Lookup(id) != NULL;
340 }
341
342
343 void*
operator new(size_t size)344 Thread::operator new(size_t size)
345 {
346 return object_cache_alloc(sThreadCache, 0);
347 }
348
349
350 void*
operator new(size_t,void * pointer)351 Thread::operator new(size_t, void* pointer)
352 {
353 return pointer;
354 }
355
356
357 void
operator delete(void * pointer,size_t size)358 Thread::operator delete(void* pointer, size_t size)
359 {
360 object_cache_free(sThreadCache, pointer, 0);
361 }
362
363
364 status_t
Init(bool idleThread)365 Thread::Init(bool idleThread)
366 {
367 status_t error = scheduler_on_thread_create(this, idleThread);
368 if (error != B_OK)
369 return error;
370
371 char temp[64];
372 snprintf(temp, sizeof(temp), "thread_%" B_PRId32 "_retcode_sem", id);
373 exit.sem = create_sem(0, temp);
374 if (exit.sem < 0)
375 return exit.sem;
376
377 snprintf(temp, sizeof(temp), "%s send", name);
378 msg.write_sem = create_sem(1, temp);
379 if (msg.write_sem < 0)
380 return msg.write_sem;
381
382 snprintf(temp, sizeof(temp), "%s receive", name);
383 msg.read_sem = create_sem(0, temp);
384 if (msg.read_sem < 0)
385 return msg.read_sem;
386
387 error = arch_thread_init_thread_struct(this);
388 if (error != B_OK)
389 return error;
390
391 return B_OK;
392 }
393
394
395 /*! Checks whether the thread is still in the thread hash table.
396 */
397 bool
IsAlive() const398 Thread::IsAlive() const
399 {
400 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock);
401
402 return sThreadHash.Lookup(id) != NULL;
403 }
404
405
406 void
ResetSignalsOnExec()407 Thread::ResetSignalsOnExec()
408 {
409 // We are supposed keep the pending signals and the signal mask. Only the
410 // signal stack, if set, shall be unset.
411
412 sigsuspend_original_unblocked_mask = 0;
413 user_signal_context = NULL;
414 signal_stack_base = 0;
415 signal_stack_size = 0;
416 signal_stack_enabled = false;
417 }
418
419
420 /*! Adds the given user timer to the thread and, if user-defined, assigns it an
421 ID.
422
423 The caller must hold the thread's lock.
424
425 \param timer The timer to be added. If it doesn't have an ID yet, it is
426 considered user-defined and will be assigned an ID.
427 \return \c B_OK, if the timer was added successfully, another error code
428 otherwise.
429 */
430 status_t
AddUserTimer(UserTimer * timer)431 Thread::AddUserTimer(UserTimer* timer)
432 {
433 // If the timer is user-defined, check timer limit and increment
434 // user-defined count.
435 if (timer->ID() < 0 && !team->CheckAddUserDefinedTimer())
436 return EAGAIN;
437
438 fUserTimers.AddTimer(timer);
439
440 return B_OK;
441 }
442
443
444 /*! Removes the given user timer from the thread.
445
446 The caller must hold the thread's lock.
447
448 \param timer The timer to be removed.
449
450 */
451 void
RemoveUserTimer(UserTimer * timer)452 Thread::RemoveUserTimer(UserTimer* timer)
453 {
454 fUserTimers.RemoveTimer(timer);
455
456 if (timer->ID() >= USER_TIMER_FIRST_USER_DEFINED_ID)
457 team->UserDefinedTimersRemoved(1);
458 }
459
460
461 /*! Deletes all (or all user-defined) user timers of the thread.
462
463 The caller must hold the thread's lock.
464
465 \param userDefinedOnly If \c true, only the user-defined timers are deleted,
466 otherwise all timers are deleted.
467 */
468 void
DeleteUserTimers(bool userDefinedOnly)469 Thread::DeleteUserTimers(bool userDefinedOnly)
470 {
471 int32 count = fUserTimers.DeleteTimers(userDefinedOnly);
472 if (count > 0)
473 team->UserDefinedTimersRemoved(count);
474 }
475
476
477 void
DeactivateCPUTimeUserTimers()478 Thread::DeactivateCPUTimeUserTimers()
479 {
480 while (ThreadTimeUserTimer* timer = fCPUTimeUserTimers.Head())
481 timer->Deactivate();
482 }
483
484
485 // #pragma mark - ThreadListIterator
486
487
ThreadListIterator()488 ThreadListIterator::ThreadListIterator()
489 {
490 // queue the entry
491 InterruptsWriteSpinLocker locker(sThreadHashLock);
492 sThreadHash.InsertIteratorEntry(&fEntry);
493 }
494
495
~ThreadListIterator()496 ThreadListIterator::~ThreadListIterator()
497 {
498 // remove the entry
499 InterruptsWriteSpinLocker locker(sThreadHashLock);
500 sThreadHash.RemoveIteratorEntry(&fEntry);
501 }
502
503
504 Thread*
Next()505 ThreadListIterator::Next()
506 {
507 // get the next team -- if there is one, get reference for it
508 InterruptsWriteSpinLocker locker(sThreadHashLock);
509 Thread* thread = sThreadHash.NextElement(&fEntry);
510 if (thread != NULL)
511 thread->AcquireReference();
512
513 return thread;
514 }
515
516
517 // #pragma mark - ThreadCreationAttributes
518
519
ThreadCreationAttributes(thread_func function,const char * name,int32 priority,void * arg,team_id team,Thread * thread)520 ThreadCreationAttributes::ThreadCreationAttributes(thread_func function,
521 const char* name, int32 priority, void* arg, team_id team,
522 Thread* thread)
523 {
524 this->entry = NULL;
525 this->name = name;
526 this->priority = priority;
527 this->args1 = NULL;
528 this->args2 = NULL;
529 this->stack_address = NULL;
530 this->stack_size = 0;
531 this->guard_size = 0;
532 this->pthread = NULL;
533 this->flags = 0;
534 this->team = team >= 0 ? team : team_get_kernel_team()->id;
535 this->thread = thread;
536 this->signal_mask = 0;
537 this->additional_stack_size = 0;
538 this->kernelEntry = function;
539 this->kernelArgument = arg;
540 this->forkArgs = NULL;
541 }
542
543
544 /*! Initializes the structure from a userland structure.
545 \param userAttributes The userland structure (must be a userland address).
546 \param nameBuffer A character array of at least size B_OS_NAME_LENGTH,
547 which will be used for the \c name field, if the userland structure has
548 a name. The buffer must remain valid as long as this structure is in
549 use afterwards (or until it is reinitialized).
550 \return \c B_OK, if the initialization went fine, another error code
551 otherwise.
552 */
553 status_t
InitFromUserAttributes(const thread_creation_attributes * userAttributes,char * nameBuffer)554 ThreadCreationAttributes::InitFromUserAttributes(
555 const thread_creation_attributes* userAttributes, char* nameBuffer)
556 {
557 if (userAttributes == NULL || !IS_USER_ADDRESS(userAttributes)
558 || user_memcpy((thread_creation_attributes*)this, userAttributes,
559 sizeof(thread_creation_attributes)) != B_OK) {
560 return B_BAD_ADDRESS;
561 }
562
563 if (stack_size != 0
564 && (stack_size < MIN_USER_STACK_SIZE
565 || stack_size > MAX_USER_STACK_SIZE)) {
566 return B_BAD_VALUE;
567 }
568
569 if (entry == NULL || !IS_USER_ADDRESS(entry)
570 || (stack_address != NULL && !IS_USER_ADDRESS(stack_address))
571 || (name != NULL && (!IS_USER_ADDRESS(name)
572 || user_strlcpy(nameBuffer, name, B_OS_NAME_LENGTH) < 0))) {
573 return B_BAD_ADDRESS;
574 }
575
576 name = name != NULL ? nameBuffer : "user thread";
577
578 // kernel only attributes (not in thread_creation_attributes):
579 Thread* currentThread = thread_get_current_thread();
580 team = currentThread->team->id;
581 thread = NULL;
582 signal_mask = currentThread->sig_block_mask;
583 // inherit the current thread's signal mask
584 additional_stack_size = 0;
585 kernelEntry = NULL;
586 kernelArgument = NULL;
587 forkArgs = NULL;
588
589 return B_OK;
590 }
591
592
593 // #pragma mark - private functions
594
595
596 /*! Inserts a thread into a team.
597 The caller must hold the team's lock, the thread's lock, and the scheduler
598 lock.
599 */
600 static void
insert_thread_into_team(Team * team,Thread * thread)601 insert_thread_into_team(Team *team, Thread *thread)
602 {
603 thread->team_next = team->thread_list;
604 team->thread_list = thread;
605 team->num_threads++;
606
607 if (team->num_threads == 1) {
608 // this was the first thread
609 team->main_thread = thread;
610 }
611 thread->team = team;
612 }
613
614
615 /*! Removes a thread from a team.
616 The caller must hold the team's lock, the thread's lock, and the scheduler
617 lock.
618 */
619 static void
remove_thread_from_team(Team * team,Thread * thread)620 remove_thread_from_team(Team *team, Thread *thread)
621 {
622 Thread *temp, *last = NULL;
623
624 for (temp = team->thread_list; temp != NULL; temp = temp->team_next) {
625 if (temp == thread) {
626 if (last == NULL)
627 team->thread_list = temp->team_next;
628 else
629 last->team_next = temp->team_next;
630
631 team->num_threads--;
632 break;
633 }
634 last = temp;
635 }
636 }
637
638
639 static status_t
enter_userspace(Thread * thread,UserThreadEntryArguments * args)640 enter_userspace(Thread* thread, UserThreadEntryArguments* args)
641 {
642 status_t error = arch_thread_init_tls(thread);
643 if (error != B_OK) {
644 dprintf("Failed to init TLS for new userland thread \"%s\" (%" B_PRId32
645 ")\n", thread->name, thread->id);
646 free(args->forkArgs);
647 return error;
648 }
649
650 user_debug_update_new_thread_flags(thread);
651
652 // init the thread's user_thread
653 user_thread* userThread = thread->user_thread;
654 arch_cpu_enable_user_access();
655 userThread->pthread = args->pthread;
656 userThread->flags = 0;
657 userThread->wait_status = B_OK;
658 userThread->defer_signals
659 = (args->flags & THREAD_CREATION_FLAG_DEFER_SIGNALS) != 0 ? 1 : 0;
660 userThread->pending_signals = 0;
661 arch_cpu_disable_user_access();
662
663 // initialize default TLS fields
664 addr_t tls[TLS_FIRST_FREE_SLOT];
665 memset(tls, 0, sizeof(tls));
666 tls[TLS_BASE_ADDRESS_SLOT] = thread->user_local_storage;
667 tls[TLS_THREAD_ID_SLOT] = thread->id;
668 tls[TLS_USER_THREAD_SLOT] = (addr_t)thread->user_thread;
669
670 if (args->forkArgs == NULL) {
671 if (user_memcpy((void*)thread->user_local_storage, tls, sizeof(tls)) != B_OK)
672 return B_BAD_ADDRESS;
673 } else {
674 // This is a fork()ed thread.
675
676 // Update select TLS values, do not clear the whole array.
677 arch_cpu_enable_user_access();
678 addr_t* userTls = (addr_t*)thread->user_local_storage;
679 ASSERT(userTls[TLS_BASE_ADDRESS_SLOT] == thread->user_local_storage);
680 userTls[TLS_THREAD_ID_SLOT] = tls[TLS_THREAD_ID_SLOT];
681 userTls[TLS_USER_THREAD_SLOT] = tls[TLS_USER_THREAD_SLOT];
682 arch_cpu_disable_user_access();
683
684 // Copy the fork args onto the stack and free them.
685 arch_fork_arg archArgs = *args->forkArgs;
686 free(args->forkArgs);
687
688 arch_restore_fork_frame(&archArgs);
689 // this one won't return here
690 return B_ERROR;
691 }
692
693 // Jump to the entry point in user space. Only returns, if something fails.
694 return arch_thread_enter_userspace(thread, args->userlandEntry,
695 args->userlandArgument1, args->userlandArgument2);
696 }
697
698
699 status_t
thread_enter_userspace_new_team(Thread * thread,addr_t entryFunction,void * argument1,void * argument2)700 thread_enter_userspace_new_team(Thread* thread, addr_t entryFunction,
701 void* argument1, void* argument2)
702 {
703 UserThreadEntryArguments entryArgs;
704 entryArgs.kernelFunction = NULL;
705 entryArgs.argument = NULL;
706 entryArgs.enterUserland = true;
707 entryArgs.userlandEntry = (addr_t)entryFunction;
708 entryArgs.userlandArgument1 = argument1;
709 entryArgs.userlandArgument2 = argument2;
710 entryArgs.pthread = NULL;
711 entryArgs.forkArgs = NULL;
712 entryArgs.flags = 0;
713
714 return enter_userspace(thread, &entryArgs);
715 }
716
717
718 static void
common_thread_entry(void * _args)719 common_thread_entry(void* _args)
720 {
721 Thread* thread = thread_get_current_thread();
722
723 // The thread is new and has been scheduled the first time.
724
725 scheduler_new_thread_entry(thread);
726
727 // unlock the scheduler lock and enable interrupts
728 release_spinlock(&thread->scheduler_lock);
729 enable_interrupts();
730
731 // call the kernel function, if any
732 ThreadEntryArguments* args = (ThreadEntryArguments*)_args;
733 if (args->kernelFunction != NULL)
734 args->kernelFunction(args->argument);
735
736 // If requested, enter userland, now.
737 if (args->enterUserland) {
738 enter_userspace(thread, (UserThreadEntryArguments*)args);
739 // only returns or error
740
741 // If that's the team's main thread, init the team exit info.
742 if (thread == thread->team->main_thread)
743 team_init_exit_info_on_error(thread->team);
744 }
745
746 // we're done
747 thread_exit();
748 }
749
750
751 /*! Prepares the given thread's kernel stack for executing its entry function.
752
753 The data pointed to by \a data of size \a dataSize are copied to the
754 thread's kernel stack. A pointer to the copy's data is passed to the entry
755 function. The entry function is common_thread_entry().
756
757 \param thread The thread.
758 \param data Pointer to data to be copied to the thread's stack and passed
759 to the entry function.
760 \param dataSize The size of \a data.
761 */
762 static void
init_thread_kernel_stack(Thread * thread,const void * data,size_t dataSize)763 init_thread_kernel_stack(Thread* thread, const void* data, size_t dataSize)
764 {
765 uint8* stack = (uint8*)thread->kernel_stack_base;
766 uint8* stackTop = (uint8*)thread->kernel_stack_top;
767
768 // clear (or rather invalidate) the kernel stack contents, if compiled with
769 // debugging
770 #if KDEBUG > 0
771 # if defined(DEBUG_KERNEL_STACKS) && defined(STACK_GROWS_DOWNWARDS)
772 memset((void*)(stack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE), 0xcc,
773 KERNEL_STACK_SIZE);
774 # else
775 memset(stack, 0xcc, KERNEL_STACK_SIZE);
776 # endif
777 #endif
778
779 // copy the data onto the stack, with 16-byte alignment to be on the safe
780 // side
781 void* clonedData;
782 #ifdef STACK_GROWS_DOWNWARDS
783 clonedData = (void*)ROUNDDOWN((addr_t)stackTop - dataSize, 16);
784 stackTop = (uint8*)clonedData;
785 #else
786 clonedData = (void*)ROUNDUP((addr_t)stack, 16);
787 stack = (uint8*)clonedData + ROUNDUP(dataSize, 16);
788 #endif
789
790 memcpy(clonedData, data, dataSize);
791
792 arch_thread_init_kthread_stack(thread, stack, stackTop,
793 &common_thread_entry, clonedData);
794 }
795
796
797 static status_t
create_thread_user_stack(Team * team,Thread * thread,void * _stackBase,size_t stackSize,size_t additionalSize,size_t guardSize,char * nameBuffer)798 create_thread_user_stack(Team* team, Thread* thread, void* _stackBase,
799 size_t stackSize, size_t additionalSize, size_t guardSize,
800 char* nameBuffer)
801 {
802 area_id stackArea = -1;
803 uint8* stackBase = (uint8*)_stackBase;
804
805 if (stackBase != NULL) {
806 // A stack has been specified. It must be large enough to hold the
807 // TLS space at least. Guard pages are ignored for existing stacks.
808 STATIC_ASSERT(TLS_SIZE < MIN_USER_STACK_SIZE);
809 if (stackSize < MIN_USER_STACK_SIZE)
810 return B_BAD_VALUE;
811
812 stackSize -= TLS_SIZE;
813 } else {
814 // No user-defined stack -- allocate one. For non-main threads the stack
815 // will be between USER_STACK_REGION and the main thread stack area. For
816 // a main thread the position is fixed.
817
818 guardSize = PAGE_ALIGN(guardSize);
819
820 if (stackSize == 0) {
821 // Use the default size (a different one for a main thread).
822 stackSize = thread->id == team->id
823 ? USER_MAIN_THREAD_STACK_SIZE : USER_STACK_SIZE;
824 } else {
825 // Verify that the given stack size is large enough.
826 if (stackSize < MIN_USER_STACK_SIZE)
827 return B_BAD_VALUE;
828
829 stackSize = PAGE_ALIGN(stackSize);
830 }
831
832 size_t areaSize = PAGE_ALIGN(guardSize + stackSize + TLS_SIZE
833 + additionalSize);
834
835 snprintf(nameBuffer, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_stack",
836 thread->name, thread->id);
837
838 stackBase = (uint8*)USER_STACK_REGION;
839
840 virtual_address_restrictions virtualRestrictions = {};
841 virtualRestrictions.address_specification = B_RANDOMIZED_BASE_ADDRESS;
842 virtualRestrictions.address = (void*)stackBase;
843
844 physical_address_restrictions physicalRestrictions = {};
845
846 stackArea = create_area_etc(team->id, nameBuffer,
847 areaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA | B_STACK_AREA,
848 0, guardSize, &virtualRestrictions, &physicalRestrictions,
849 (void**)&stackBase);
850 if (stackArea < 0)
851 return stackArea;
852 }
853
854 // set the stack
855 ThreadLocker threadLocker(thread);
856 #ifdef STACK_GROWS_DOWNWARDS
857 thread->user_stack_base = (addr_t)stackBase + guardSize;
858 #else
859 thread->user_stack_base = (addr_t)stackBase;
860 #endif
861 thread->user_stack_size = stackSize;
862 thread->user_stack_area = stackArea;
863
864 return B_OK;
865 }
866
867
868 status_t
thread_create_user_stack(Team * team,Thread * thread,void * stackBase,size_t stackSize,size_t additionalSize)869 thread_create_user_stack(Team* team, Thread* thread, void* stackBase,
870 size_t stackSize, size_t additionalSize)
871 {
872 char nameBuffer[B_OS_NAME_LENGTH];
873 return create_thread_user_stack(team, thread, stackBase, stackSize,
874 additionalSize, USER_STACK_GUARD_SIZE, nameBuffer);
875 }
876
877
878 /*! Creates a new thread.
879
880 \param attributes The thread creation attributes, specifying the team in
881 which to create the thread, as well as a whole bunch of other arguments.
882 \param kernel \c true, if a kernel-only thread shall be created, \c false,
883 if the thread shall also be able to run in userland.
884 \return The ID of the newly created thread (>= 0) or an error code on
885 failure.
886 */
887 thread_id
thread_create_thread(const ThreadCreationAttributes & attributes,bool kernel)888 thread_create_thread(const ThreadCreationAttributes& attributes, bool kernel)
889 {
890 status_t status = B_OK;
891
892 TRACE(("thread_create_thread(%s, thread = %p, %s)\n", attributes.name,
893 attributes.thread, kernel ? "kernel" : "user"));
894
895 // get the team
896 Team* team = Team::Get(attributes.team);
897 if (team == NULL)
898 return B_BAD_TEAM_ID;
899 BReference<Team> teamReference(team, true);
900
901 // If a thread object is given, acquire a reference to it, otherwise create
902 // a new thread object with the given attributes.
903 Thread* thread = attributes.thread;
904 if (thread != NULL) {
905 thread->AcquireReference();
906 } else {
907 status = Thread::Create(attributes.name, thread);
908 if (status != B_OK)
909 return status;
910 }
911 BReference<Thread> threadReference(thread, true);
912
913 thread->team = team;
914 // set already, so, if something goes wrong, the team pointer is
915 // available for deinitialization
916 thread->priority = attributes.priority == -1
917 ? B_NORMAL_PRIORITY : attributes.priority;
918 thread->priority = std::max(thread->priority,
919 (int32)THREAD_MIN_SET_PRIORITY);
920 thread->priority = std::min(thread->priority,
921 (int32)THREAD_MAX_SET_PRIORITY);
922 thread->state = B_THREAD_SUSPENDED;
923
924 thread->sig_block_mask = attributes.signal_mask;
925
926 // init debug structure
927 init_thread_debug_info(&thread->debug_info);
928
929 // create the kernel stack
930 char stackName[B_OS_NAME_LENGTH];
931 snprintf(stackName, B_OS_NAME_LENGTH, "%s_%" B_PRId32 "_kstack",
932 thread->name, thread->id);
933 virtual_address_restrictions virtualRestrictions = {};
934 virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
935 physical_address_restrictions physicalRestrictions = {};
936
937 thread->kernel_stack_area = create_area_etc(B_SYSTEM_TEAM, stackName,
938 KERNEL_STACK_SIZE + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE,
939 B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA
940 | B_KERNEL_STACK_AREA, 0, KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE,
941 &virtualRestrictions, &physicalRestrictions,
942 (void**)&thread->kernel_stack_base);
943
944 if (thread->kernel_stack_area < 0) {
945 // we're not yet part of a team, so we can just bail out
946 status = thread->kernel_stack_area;
947
948 dprintf("create_thread: error creating kernel stack: %s!\n",
949 strerror(status));
950
951 return status;
952 }
953
954 thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE
955 + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
956
957 if (kernel) {
958 // Init the thread's kernel stack. It will start executing
959 // common_thread_entry() with the arguments we prepare here.
960 ThreadEntryArguments entryArgs;
961 entryArgs.kernelFunction = attributes.kernelEntry;
962 entryArgs.argument = attributes.kernelArgument;
963 entryArgs.enterUserland = false;
964
965 init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs));
966 } else {
967 // create the userland stack, if the thread doesn't have one yet
968 if (thread->user_stack_base == 0) {
969 status = create_thread_user_stack(team, thread,
970 attributes.stack_address, attributes.stack_size,
971 attributes.additional_stack_size, attributes.guard_size,
972 stackName);
973 if (status != B_OK)
974 return status;
975 }
976
977 // Init the thread's kernel stack. It will start executing
978 // common_thread_entry() with the arguments we prepare here.
979 UserThreadEntryArguments entryArgs;
980 entryArgs.kernelFunction = attributes.kernelEntry;
981 entryArgs.argument = attributes.kernelArgument;
982 entryArgs.enterUserland = true;
983 entryArgs.userlandEntry = (addr_t)attributes.entry;
984 entryArgs.userlandArgument1 = attributes.args1;
985 entryArgs.userlandArgument2 = attributes.args2;
986 entryArgs.pthread = attributes.pthread;
987 entryArgs.forkArgs = attributes.forkArgs;
988 entryArgs.flags = attributes.flags;
989
990 init_thread_kernel_stack(thread, &entryArgs, sizeof(entryArgs));
991
992 // create the pre-defined thread timers
993 status = user_timer_create_thread_timers(team, thread);
994 if (status != B_OK)
995 return status;
996 }
997
998 // lock the team and see, if it is still alive
999 TeamLocker teamLocker(team);
1000 if (team->state >= TEAM_STATE_SHUTDOWN)
1001 return B_BAD_TEAM_ID;
1002
1003 bool debugNewThread = false;
1004 if (!kernel) {
1005 // allocate the user_thread structure, if not already allocated
1006 if (thread->user_thread == NULL) {
1007 thread->user_thread = team_allocate_user_thread(team);
1008 if (thread->user_thread == NULL)
1009 return B_NO_MEMORY;
1010 }
1011
1012 // If the new thread belongs to the same team as the current thread, it
1013 // may inherit some of the thread debug flags.
1014 Thread* currentThread = thread_get_current_thread();
1015 if (currentThread != NULL && currentThread->team == team) {
1016 // inherit all user flags...
1017 int32 debugFlags = atomic_get(¤tThread->debug_info.flags)
1018 & B_THREAD_DEBUG_USER_FLAG_MASK;
1019
1020 // ... save the syscall tracing flags, unless explicitely specified
1021 if (!(debugFlags & B_THREAD_DEBUG_SYSCALL_TRACE_CHILD_THREADS)) {
1022 debugFlags &= ~(B_THREAD_DEBUG_PRE_SYSCALL
1023 | B_THREAD_DEBUG_POST_SYSCALL);
1024 }
1025
1026 thread->debug_info.flags = debugFlags;
1027
1028 // stop the new thread, if desired
1029 debugNewThread = debugFlags & B_THREAD_DEBUG_STOP_CHILD_THREADS;
1030 }
1031 }
1032
1033 // We're going to make the thread live, now. The thread itself will take
1034 // over a reference to its Thread object. We'll acquire another reference
1035 // for our own use (and threadReference remains armed).
1036
1037 ThreadLocker threadLocker(thread);
1038
1039 InterruptsSpinLocker threadCreationLocker(gThreadCreationLock);
1040 WriteSpinLocker threadHashLocker(sThreadHashLock);
1041
1042 // check the thread limit
1043 if (sUsedThreads >= sMaxThreads) {
1044 // Clean up the user_thread structure. It's a bit unfortunate that the
1045 // Thread destructor cannot do that, so we have to do that explicitly.
1046 threadHashLocker.Unlock();
1047 threadCreationLocker.Unlock();
1048
1049 user_thread* userThread = thread->user_thread;
1050 thread->user_thread = NULL;
1051
1052 threadLocker.Unlock();
1053 teamLocker.Unlock();
1054
1055 if (userThread != NULL)
1056 team_free_user_thread(team, userThread);
1057
1058 return B_NO_MORE_THREADS;
1059 }
1060
1061 // make thread visible in global hash/list
1062 thread->visible = true;
1063 sUsedThreads++;
1064
1065 scheduler_on_thread_init(thread);
1066
1067 thread->AcquireReference();
1068
1069 // Debug the new thread, if the parent thread required that (see above),
1070 // or the respective global team debug flag is set. But only, if a
1071 // debugger is installed for the team.
1072 if (!kernel) {
1073 int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
1074 debugNewThread |= (teamDebugFlags & B_TEAM_DEBUG_STOP_NEW_THREADS) != 0;
1075 if (debugNewThread
1076 && (teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) != 0) {
1077 thread->debug_info.flags |= B_THREAD_DEBUG_STOP;
1078 }
1079 }
1080
1081 {
1082 SpinLocker signalLocker(team->signal_lock);
1083 SpinLocker timeLocker(team->time_lock);
1084
1085 // insert thread into team
1086 insert_thread_into_team(team, thread);
1087 }
1088
1089 threadHashLocker.Unlock();
1090 threadCreationLocker.Unlock();
1091 threadLocker.Unlock();
1092 teamLocker.Unlock();
1093
1094 // notify listeners
1095 sNotificationService.Notify(THREAD_ADDED, thread);
1096
1097 return thread->id;
1098 }
1099
1100
1101 static status_t
undertaker(void *)1102 undertaker(void* /*args*/)
1103 {
1104 while (true) {
1105 // wait for a thread to bury
1106 InterruptsSpinLocker locker(sUndertakerLock);
1107
1108 while (sUndertakerEntries.IsEmpty()) {
1109 ConditionVariableEntry conditionEntry;
1110 sUndertakerCondition.Add(&conditionEntry);
1111 locker.Unlock();
1112
1113 conditionEntry.Wait();
1114
1115 locker.Lock();
1116 }
1117
1118 UndertakerEntry* _entry = sUndertakerEntries.RemoveHead();
1119 locker.Unlock();
1120
1121 UndertakerEntry entry = *_entry;
1122 // we need a copy, since the original entry is on the thread's stack
1123
1124 // we've got an entry
1125 Thread* thread = entry.thread;
1126
1127 // make sure the thread isn't running anymore
1128 InterruptsSpinLocker schedulerLocker(thread->scheduler_lock);
1129 ASSERT(thread->state == THREAD_STATE_FREE_ON_RESCHED);
1130 schedulerLocker.Unlock();
1131
1132 // remove this thread from from the kernel team -- this makes it
1133 // unaccessible
1134 Team* kernelTeam = team_get_kernel_team();
1135 TeamLocker kernelTeamLocker(kernelTeam);
1136 thread->Lock();
1137
1138 InterruptsSpinLocker threadCreationLocker(gThreadCreationLock);
1139 SpinLocker signalLocker(kernelTeam->signal_lock);
1140 SpinLocker timeLocker(kernelTeam->time_lock);
1141
1142 remove_thread_from_team(kernelTeam, thread);
1143
1144 timeLocker.Unlock();
1145 signalLocker.Unlock();
1146 threadCreationLocker.Unlock();
1147
1148 kernelTeamLocker.Unlock();
1149
1150 // free the thread structure
1151 thread->UnlockAndReleaseReference();
1152 }
1153
1154 // can never get here
1155 return B_OK;
1156 }
1157
1158
1159 /*! Returns the semaphore the thread is currently waiting on.
1160
1161 The return value is purely informative.
1162 The caller must hold the scheduler lock.
1163
1164 \param thread The thread.
1165 \return The ID of the semaphore the thread is currently waiting on or \c -1,
1166 if it isn't waiting on a semaphore.
1167 */
1168 static sem_id
get_thread_wait_sem(Thread * thread)1169 get_thread_wait_sem(Thread* thread)
1170 {
1171 if (thread->state == B_THREAD_WAITING
1172 && thread->wait.type == THREAD_BLOCK_TYPE_SEMAPHORE) {
1173 return (sem_id)(addr_t)thread->wait.object;
1174 }
1175 return -1;
1176 }
1177
1178
1179 /*! Fills the thread_info structure with information from the specified thread.
1180 The caller must hold the thread's lock and the scheduler lock.
1181 */
1182 static void
fill_thread_info(Thread * thread,thread_info * info,size_t size)1183 fill_thread_info(Thread *thread, thread_info *info, size_t size)
1184 {
1185 info->thread = thread->id;
1186 info->team = thread->team->id;
1187
1188 strlcpy(info->name, thread->name, B_OS_NAME_LENGTH);
1189
1190 info->sem = -1;
1191
1192 if (thread->state == B_THREAD_WAITING) {
1193 info->state = B_THREAD_WAITING;
1194
1195 switch (thread->wait.type) {
1196 case THREAD_BLOCK_TYPE_SNOOZE:
1197 info->state = B_THREAD_ASLEEP;
1198 break;
1199
1200 case THREAD_BLOCK_TYPE_SEMAPHORE:
1201 {
1202 sem_id sem = (sem_id)(addr_t)thread->wait.object;
1203 if (sem == thread->msg.read_sem)
1204 info->state = B_THREAD_RECEIVING;
1205 else
1206 info->sem = sem;
1207 break;
1208 }
1209
1210 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1211 default:
1212 break;
1213 }
1214 } else
1215 info->state = (thread_state)thread->state;
1216
1217 info->priority = thread->priority;
1218 info->stack_base = (void *)thread->user_stack_base;
1219 info->stack_end = (void *)(thread->user_stack_base
1220 + thread->user_stack_size);
1221
1222 InterruptsSpinLocker threadTimeLocker(thread->time_lock);
1223 info->user_time = thread->user_time;
1224 info->kernel_time = thread->kernel_time;
1225 if (thread->last_time != 0) {
1226 const bigtime_t current = system_time() - thread->last_time;
1227 if (thread->in_kernel)
1228 info->kernel_time += current;
1229 else
1230 info->user_time += current;
1231 }
1232 }
1233
1234
1235 static status_t
send_data_etc(thread_id id,int32 code,const void * buffer,size_t bufferSize,int32 flags)1236 send_data_etc(thread_id id, int32 code, const void *buffer, size_t bufferSize,
1237 int32 flags)
1238 {
1239 // get the thread
1240 Thread *target = Thread::Get(id);
1241 if (target == NULL)
1242 return B_BAD_THREAD_ID;
1243 BReference<Thread> targetReference(target, true);
1244
1245 // get the write semaphore
1246 ThreadLocker targetLocker(target);
1247 sem_id cachedSem = target->msg.write_sem;
1248 targetLocker.Unlock();
1249
1250 if (bufferSize > THREAD_MAX_MESSAGE_SIZE)
1251 return B_NO_MEMORY;
1252
1253 status_t status = acquire_sem_etc(cachedSem, 1, flags, 0);
1254 if (status == B_INTERRUPTED) {
1255 // we got interrupted by a signal
1256 return status;
1257 }
1258 if (status != B_OK) {
1259 // Any other acquisition problems may be due to thread deletion
1260 return B_BAD_THREAD_ID;
1261 }
1262
1263 void* data;
1264 if (bufferSize > 0) {
1265 data = malloc(bufferSize);
1266 if (data == NULL)
1267 return B_NO_MEMORY;
1268 if (user_memcpy(data, buffer, bufferSize) != B_OK) {
1269 free(data);
1270 return B_BAD_DATA;
1271 }
1272 } else
1273 data = NULL;
1274
1275 targetLocker.Lock();
1276
1277 // The target thread could have been deleted at this point.
1278 if (!target->IsAlive()) {
1279 targetLocker.Unlock();
1280 free(data);
1281 return B_BAD_THREAD_ID;
1282 }
1283
1284 // Save message informations
1285 target->msg.sender = thread_get_current_thread()->id;
1286 target->msg.code = code;
1287 target->msg.size = bufferSize;
1288 target->msg.buffer = data;
1289 cachedSem = target->msg.read_sem;
1290
1291 targetLocker.Unlock();
1292
1293 release_sem(cachedSem);
1294 return B_OK;
1295 }
1296
1297
1298 static int32
receive_data_etc(thread_id * _sender,void * buffer,size_t bufferSize,int32 flags)1299 receive_data_etc(thread_id *_sender, void *buffer, size_t bufferSize,
1300 int32 flags)
1301 {
1302 Thread *thread = thread_get_current_thread();
1303 size_t size;
1304 int32 code;
1305
1306 status_t status = acquire_sem_etc(thread->msg.read_sem, 1, flags, 0);
1307 if (status != B_OK) {
1308 // Actually, we're not supposed to return error codes
1309 // but since the only reason this can fail is that we
1310 // were killed, it's probably okay to do so (but also
1311 // meaningless).
1312 return status;
1313 }
1314
1315 if (buffer != NULL && bufferSize != 0 && thread->msg.buffer != NULL) {
1316 size = min_c(bufferSize, thread->msg.size);
1317 status = user_memcpy(buffer, thread->msg.buffer, size);
1318 if (status != B_OK) {
1319 free(thread->msg.buffer);
1320 release_sem(thread->msg.write_sem);
1321 return status;
1322 }
1323 }
1324
1325 *_sender = thread->msg.sender;
1326 code = thread->msg.code;
1327
1328 free(thread->msg.buffer);
1329 release_sem(thread->msg.write_sem);
1330
1331 return code;
1332 }
1333
1334
1335 static status_t
common_getrlimit(int resource,struct rlimit * rlp)1336 common_getrlimit(int resource, struct rlimit * rlp)
1337 {
1338 if (!rlp)
1339 return B_BAD_ADDRESS;
1340
1341 switch (resource) {
1342 case RLIMIT_AS:
1343 rlp->rlim_cur = __HAIKU_ADDR_MAX;
1344 rlp->rlim_max = __HAIKU_ADDR_MAX;
1345 return B_OK;
1346
1347 case RLIMIT_CORE:
1348 rlp->rlim_cur = 0;
1349 rlp->rlim_max = 0;
1350 return B_OK;
1351
1352 case RLIMIT_DATA:
1353 rlp->rlim_cur = RLIM_INFINITY;
1354 rlp->rlim_max = RLIM_INFINITY;
1355 return B_OK;
1356
1357 case RLIMIT_NOFILE:
1358 case RLIMIT_NOVMON:
1359 return vfs_getrlimit(resource, rlp);
1360
1361 case RLIMIT_STACK:
1362 {
1363 rlp->rlim_cur = USER_MAIN_THREAD_STACK_SIZE;
1364 rlp->rlim_max = USER_MAIN_THREAD_STACK_SIZE;
1365 return B_OK;
1366 }
1367
1368 default:
1369 return EINVAL;
1370 }
1371
1372 return B_OK;
1373 }
1374
1375
1376 static status_t
common_setrlimit(int resource,const struct rlimit * rlp)1377 common_setrlimit(int resource, const struct rlimit * rlp)
1378 {
1379 if (!rlp)
1380 return B_BAD_ADDRESS;
1381
1382 switch (resource) {
1383 case RLIMIT_CORE:
1384 // We don't support core file, so allow settings to 0/0 only.
1385 if (rlp->rlim_cur != 0 || rlp->rlim_max != 0)
1386 return EINVAL;
1387 return B_OK;
1388
1389 case RLIMIT_NOFILE:
1390 case RLIMIT_NOVMON:
1391 return vfs_setrlimit(resource, rlp);
1392
1393 default:
1394 return EINVAL;
1395 }
1396
1397 return B_OK;
1398 }
1399
1400
1401 static status_t
common_snooze_etc(bigtime_t timeout,clockid_t clockID,uint32 flags,bigtime_t * _remainingTime)1402 common_snooze_etc(bigtime_t timeout, clockid_t clockID, uint32 flags,
1403 bigtime_t* _remainingTime)
1404 {
1405 #if KDEBUG
1406 if (!are_interrupts_enabled()) {
1407 panic("common_snooze_etc(): called with interrupts disabled, timeout "
1408 "%" B_PRIdBIGTIME, timeout);
1409 }
1410 #endif
1411
1412 switch (clockID) {
1413 case CLOCK_REALTIME:
1414 // make sure the B_TIMEOUT_REAL_TIME_BASE flag is set and fall
1415 // through
1416 flags |= B_TIMEOUT_REAL_TIME_BASE;
1417 case CLOCK_MONOTONIC:
1418 {
1419 // Store the start time, for the case that we get interrupted and
1420 // need to return the remaining time. For absolute timeouts we can
1421 // still get he time later, if needed.
1422 bigtime_t startTime
1423 = _remainingTime != NULL && (flags & B_RELATIVE_TIMEOUT) != 0
1424 ? system_time() : 0;
1425
1426 Thread* thread = thread_get_current_thread();
1427
1428 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_SNOOZE,
1429 NULL);
1430 status_t status = thread_block_with_timeout(flags, timeout);
1431
1432 if (status == B_TIMED_OUT || status == B_WOULD_BLOCK)
1433 return B_OK;
1434
1435 // If interrupted, compute the remaining time, if requested.
1436 if (status == B_INTERRUPTED && _remainingTime != NULL) {
1437 if ((flags & B_RELATIVE_TIMEOUT) != 0) {
1438 *_remainingTime = std::max(
1439 startTime + timeout - system_time(), (bigtime_t)0);
1440 } else {
1441 bigtime_t now = (flags & B_TIMEOUT_REAL_TIME_BASE) != 0
1442 ? real_time_clock_usecs() : system_time();
1443 *_remainingTime = std::max(timeout - now, (bigtime_t)0);
1444 }
1445 }
1446
1447 return status;
1448 }
1449
1450 case CLOCK_THREAD_CPUTIME_ID:
1451 // Waiting for ourselves to do something isn't particularly
1452 // productive.
1453 return B_BAD_VALUE;
1454
1455 case CLOCK_PROCESS_CPUTIME_ID:
1456 default:
1457 // We don't have to support those, but we are allowed to. Could be
1458 // done be creating a UserTimer on the fly with a custom UserEvent
1459 // that would just wake us up.
1460 return ENOTSUP;
1461 }
1462 }
1463
1464
1465 // #pragma mark - debugger calls
1466
1467
1468 static int
make_thread_unreal(int argc,char ** argv)1469 make_thread_unreal(int argc, char **argv)
1470 {
1471 int32 id = -1;
1472
1473 if (argc > 2) {
1474 print_debugger_command_usage(argv[0]);
1475 return 0;
1476 }
1477
1478 if (argc > 1)
1479 id = strtoul(argv[1], NULL, 0);
1480
1481 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1482 Thread* thread = it.Next();) {
1483 if (id != -1 && thread->id != id)
1484 continue;
1485
1486 if (thread->priority > B_DISPLAY_PRIORITY) {
1487 scheduler_set_thread_priority(thread, B_NORMAL_PRIORITY);
1488 kprintf("thread %" B_PRId32 " made unreal\n", thread->id);
1489 }
1490 }
1491
1492 return 0;
1493 }
1494
1495
1496 static int
set_thread_prio(int argc,char ** argv)1497 set_thread_prio(int argc, char **argv)
1498 {
1499 int32 id;
1500 int32 prio;
1501
1502 if (argc > 3 || argc < 2) {
1503 print_debugger_command_usage(argv[0]);
1504 return 0;
1505 }
1506
1507 prio = strtoul(argv[1], NULL, 0);
1508 if (prio > THREAD_MAX_SET_PRIORITY)
1509 prio = THREAD_MAX_SET_PRIORITY;
1510 if (prio < THREAD_MIN_SET_PRIORITY)
1511 prio = THREAD_MIN_SET_PRIORITY;
1512
1513 if (argc > 2)
1514 id = strtoul(argv[2], NULL, 0);
1515 else
1516 id = thread_get_current_thread()->id;
1517
1518 bool found = false;
1519 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1520 Thread* thread = it.Next();) {
1521 if (thread->id != id)
1522 continue;
1523 scheduler_set_thread_priority(thread, prio);
1524 kprintf("thread %" B_PRId32 " set to priority %" B_PRId32 "\n", id, prio);
1525 found = true;
1526 break;
1527 }
1528 if (!found)
1529 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id);
1530
1531 return 0;
1532 }
1533
1534
1535 static int
make_thread_suspended(int argc,char ** argv)1536 make_thread_suspended(int argc, char **argv)
1537 {
1538 int32 id;
1539
1540 if (argc > 2) {
1541 print_debugger_command_usage(argv[0]);
1542 return 0;
1543 }
1544
1545 if (argc == 1)
1546 id = thread_get_current_thread()->id;
1547 else
1548 id = strtoul(argv[1], NULL, 0);
1549
1550 bool found = false;
1551 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1552 Thread* thread = it.Next();) {
1553 if (thread->id != id)
1554 continue;
1555
1556 Signal signal(SIGSTOP, SI_USER, B_OK, team_get_kernel_team()->id);
1557 send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
1558
1559 kprintf("thread %" B_PRId32 " suspended\n", id);
1560 found = true;
1561 break;
1562 }
1563 if (!found)
1564 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id);
1565
1566 return 0;
1567 }
1568
1569
1570 static int
make_thread_resumed(int argc,char ** argv)1571 make_thread_resumed(int argc, char **argv)
1572 {
1573 int32 id;
1574
1575 if (argc != 2) {
1576 print_debugger_command_usage(argv[0]);
1577 return 0;
1578 }
1579
1580 // force user to enter a thread id, as using
1581 // the current thread is usually not intended
1582 id = strtoul(argv[1], NULL, 0);
1583
1584 bool found = false;
1585 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1586 Thread* thread = it.Next();) {
1587 if (thread->id != id)
1588 continue;
1589
1590 if (thread->state == B_THREAD_SUSPENDED || thread->state == B_THREAD_ASLEEP
1591 || thread->state == B_THREAD_WAITING) {
1592 scheduler_enqueue_in_run_queue(thread);
1593 kprintf("thread %" B_PRId32 " resumed\n", thread->id);
1594 } else
1595 kprintf("thread %" B_PRId32 " is already running\n", thread->id);
1596 found = true;
1597 break;
1598 }
1599 if (!found)
1600 kprintf("thread %" B_PRId32 " (%#" B_PRIx32 ") not found\n", id, id);
1601
1602 return 0;
1603 }
1604
1605
1606 static int
drop_into_debugger(int argc,char ** argv)1607 drop_into_debugger(int argc, char **argv)
1608 {
1609 status_t err;
1610 int32 id;
1611
1612 if (argc > 2) {
1613 print_debugger_command_usage(argv[0]);
1614 return 0;
1615 }
1616
1617 if (argc == 1)
1618 id = thread_get_current_thread()->id;
1619 else
1620 id = strtoul(argv[1], NULL, 0);
1621
1622 err = _user_debug_thread(id);
1623 // TODO: This is a non-trivial syscall doing some locking, so this is
1624 // really nasty and may go seriously wrong.
1625 if (err)
1626 kprintf("drop failed\n");
1627 else
1628 kprintf("thread %" B_PRId32 " dropped into user debugger\n", id);
1629
1630 return 0;
1631 }
1632
1633
1634 /*! Returns a user-readable string for a thread state.
1635 Only for use in the kernel debugger.
1636 */
1637 static const char *
state_to_text(Thread * thread,int32 state)1638 state_to_text(Thread *thread, int32 state)
1639 {
1640 switch (state) {
1641 case B_THREAD_READY:
1642 return "ready";
1643
1644 case B_THREAD_RUNNING:
1645 return "running";
1646
1647 case B_THREAD_WAITING:
1648 {
1649 if (thread != NULL) {
1650 switch (thread->wait.type) {
1651 case THREAD_BLOCK_TYPE_SNOOZE:
1652 return "zzz";
1653
1654 case THREAD_BLOCK_TYPE_SEMAPHORE:
1655 {
1656 sem_id sem = (sem_id)(addr_t)thread->wait.object;
1657 if (sem == thread->msg.read_sem)
1658 return "receive";
1659 break;
1660 }
1661 }
1662 }
1663
1664 return "waiting";
1665 }
1666
1667 case B_THREAD_SUSPENDED:
1668 return "suspended";
1669
1670 case THREAD_STATE_FREE_ON_RESCHED:
1671 return "death";
1672
1673 default:
1674 return "UNKNOWN";
1675 }
1676 }
1677
1678
1679 static void
print_thread_list_table_head()1680 print_thread_list_table_head()
1681 {
1682 kprintf("%-*s id state wait for %-*s cpu pri %-*s team "
1683 "name\n",
1684 B_PRINTF_POINTER_WIDTH, "thread", B_PRINTF_POINTER_WIDTH, "object",
1685 B_PRINTF_POINTER_WIDTH, "stack");
1686 }
1687
1688
1689 static void
_dump_thread_info(Thread * thread,bool shortInfo)1690 _dump_thread_info(Thread *thread, bool shortInfo)
1691 {
1692 if (shortInfo) {
1693 kprintf("%p %6" B_PRId32 " %-10s", thread, thread->id,
1694 state_to_text(thread, thread->state));
1695
1696 // does it block on a semaphore or a condition variable?
1697 if (thread->state == B_THREAD_WAITING) {
1698 switch (thread->wait.type) {
1699 case THREAD_BLOCK_TYPE_SEMAPHORE:
1700 {
1701 sem_id sem = (sem_id)(addr_t)thread->wait.object;
1702 if (sem == thread->msg.read_sem)
1703 kprintf("%*s", B_PRINTF_POINTER_WIDTH + 15, "");
1704 else {
1705 kprintf("sem %-*" B_PRId32,
1706 B_PRINTF_POINTER_WIDTH + 5, sem);
1707 }
1708 break;
1709 }
1710
1711 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1712 {
1713 char name[5];
1714 ssize_t length = ConditionVariable::DebugGetType(
1715 (ConditionVariable*)thread->wait.object, name, sizeof(name));
1716 if (length > 0)
1717 kprintf("cvar:%*s %p ", 4, name, thread->wait.object);
1718 else
1719 kprintf("cvar %p ", thread->wait.object);
1720 break;
1721 }
1722
1723 case THREAD_BLOCK_TYPE_SNOOZE:
1724 kprintf("%*s", B_PRINTF_POINTER_WIDTH + 15, "");
1725 break;
1726
1727 case THREAD_BLOCK_TYPE_SIGNAL:
1728 kprintf("signal%*s", B_PRINTF_POINTER_WIDTH + 9, "");
1729 break;
1730
1731 case THREAD_BLOCK_TYPE_MUTEX:
1732 kprintf("mutex %p ", thread->wait.object);
1733 break;
1734
1735 case THREAD_BLOCK_TYPE_RW_LOCK:
1736 kprintf("rwlock %p ", thread->wait.object);
1737 break;
1738
1739 case THREAD_BLOCK_TYPE_USER:
1740 kprintf("user%*s", B_PRINTF_POINTER_WIDTH + 11, "");
1741 break;
1742
1743 case THREAD_BLOCK_TYPE_OTHER:
1744 kprintf("other%*s", B_PRINTF_POINTER_WIDTH + 10, "");
1745 break;
1746
1747 case THREAD_BLOCK_TYPE_OTHER_OBJECT:
1748 kprintf("other %p ", thread->wait.object);
1749 break;
1750
1751 default:
1752 kprintf("??? %p ", thread->wait.object);
1753 break;
1754 }
1755 } else
1756 kprintf("-%*s", B_PRINTF_POINTER_WIDTH + 14, "");
1757
1758 // on which CPU does it run?
1759 if (thread->cpu)
1760 kprintf("%2d", thread->cpu->cpu_num);
1761 else
1762 kprintf(" -");
1763
1764 kprintf("%4" B_PRId32 " %p%5" B_PRId32 " %s\n", thread->priority,
1765 (void *)thread->kernel_stack_base, thread->team->id, thread->name);
1766
1767 return;
1768 }
1769
1770 // print the long info
1771
1772 struct thread_death_entry *death = NULL;
1773
1774 kprintf("THREAD: %p\n", thread);
1775 kprintf("id: %" B_PRId32 " (%#" B_PRIx32 ")\n", thread->id,
1776 thread->id);
1777 kprintf("serial_number: %" B_PRId64 "\n", thread->serial_number);
1778 kprintf("name: \"%s\"\n", thread->name);
1779 kprintf("hash_next: %p\nteam_next: %p\n",
1780 thread->hash_next, thread->team_next);
1781 kprintf("priority: %" B_PRId32 " (I/O: %" B_PRId32 ")\n",
1782 thread->priority, thread->io_priority);
1783 kprintf("state: %s\n", state_to_text(thread, thread->state));
1784 kprintf("cpu: %p ", thread->cpu);
1785 if (thread->cpu)
1786 kprintf("(%d)\n", thread->cpu->cpu_num);
1787 else
1788 kprintf("\n");
1789 kprintf("cpumask: %#" B_PRIx32 "\n", thread->cpumask.Bits(0));
1790 kprintf("sig_pending: %#" B_PRIx64 " (blocked: %#" B_PRIx64
1791 ", before sigsuspend(): %#" B_PRIx64 ")\n",
1792 (int64)thread->ThreadPendingSignals(),
1793 (int64)thread->sig_block_mask,
1794 (int64)thread->sigsuspend_original_unblocked_mask);
1795 kprintf("in_kernel: %d\n", thread->in_kernel);
1796
1797 if (thread->state == B_THREAD_WAITING) {
1798 kprintf("waiting for: ");
1799
1800 switch (thread->wait.type) {
1801 case THREAD_BLOCK_TYPE_SEMAPHORE:
1802 {
1803 sem_id sem = (sem_id)(addr_t)thread->wait.object;
1804 if (sem == thread->msg.read_sem)
1805 kprintf("data\n");
1806 else
1807 kprintf("semaphore %" B_PRId32 "\n", sem);
1808 break;
1809 }
1810
1811 case THREAD_BLOCK_TYPE_CONDITION_VARIABLE:
1812 kprintf("condition variable %p\n", thread->wait.object);
1813 break;
1814
1815 case THREAD_BLOCK_TYPE_SNOOZE:
1816 kprintf("snooze()\n");
1817 break;
1818
1819 case THREAD_BLOCK_TYPE_SIGNAL:
1820 kprintf("signal\n");
1821 break;
1822
1823 case THREAD_BLOCK_TYPE_MUTEX:
1824 kprintf("mutex %p\n", thread->wait.object);
1825 break;
1826
1827 case THREAD_BLOCK_TYPE_RW_LOCK:
1828 kprintf("rwlock %p\n", thread->wait.object);
1829 break;
1830
1831 case THREAD_BLOCK_TYPE_USER:
1832 kprintf("user\n");
1833 break;
1834
1835 case THREAD_BLOCK_TYPE_OTHER:
1836 kprintf("other (%s)\n", (char*)thread->wait.object);
1837 break;
1838
1839 case THREAD_BLOCK_TYPE_OTHER_OBJECT:
1840 kprintf("other (%p)\n", thread->wait.object);
1841 break;
1842
1843 default:
1844 kprintf("unknown (%p)\n", thread->wait.object);
1845 break;
1846 }
1847 }
1848
1849 kprintf("fault_handler: %p\n", (void *)thread->fault_handler);
1850 kprintf("team: %p, \"%s\"\n", thread->team,
1851 thread->team->Name());
1852 kprintf(" exit.sem: %" B_PRId32 "\n", thread->exit.sem);
1853 kprintf(" exit.status: %#" B_PRIx32 " (%s)\n", thread->exit.status,
1854 strerror(thread->exit.status));
1855 kprintf(" exit.waiters:\n");
1856 while ((death = (struct thread_death_entry*)list_get_next_item(
1857 &thread->exit.waiters, death)) != NULL) {
1858 kprintf("\t%p (thread %" B_PRId32 ")\n", death, death->thread);
1859 }
1860
1861 kprintf("kernel_stack_area: %" B_PRId32 "\n", thread->kernel_stack_area);
1862 kprintf("kernel_stack_base: %p\n", (void *)thread->kernel_stack_base);
1863 kprintf("user_stack_area: %" B_PRId32 "\n", thread->user_stack_area);
1864 kprintf("user_stack_base: %p\n", (void *)thread->user_stack_base);
1865 kprintf("user_local_storage: %p\n", (void *)thread->user_local_storage);
1866 kprintf("user_thread: %p\n", (void *)thread->user_thread);
1867 kprintf("kernel_errno: %#x (%s)\n", thread->kernel_errno,
1868 strerror(thread->kernel_errno));
1869 kprintf("kernel_time: %" B_PRId64 "\n", thread->kernel_time);
1870 kprintf("user_time: %" B_PRId64 "\n", thread->user_time);
1871 kprintf("flags: 0x%" B_PRIx32 "\n", thread->flags);
1872 kprintf("architecture dependant section:\n");
1873 arch_thread_dump_info(&thread->arch_info);
1874 kprintf("scheduler data:\n");
1875 scheduler_dump_thread_data(thread);
1876 }
1877
1878
1879 static int
dump_thread_info(int argc,char ** argv)1880 dump_thread_info(int argc, char **argv)
1881 {
1882 bool shortInfo = false;
1883 int argi = 1;
1884 if (argi < argc && strcmp(argv[argi], "-s") == 0) {
1885 shortInfo = true;
1886 print_thread_list_table_head();
1887 argi++;
1888 }
1889
1890 if (argi == argc) {
1891 _dump_thread_info(thread_get_current_thread(), shortInfo);
1892 return 0;
1893 }
1894
1895 for (; argi < argc; argi++) {
1896 const char *name = argv[argi];
1897 ulong arg = strtoul(name, NULL, 0);
1898
1899 if (IS_KERNEL_ADDRESS(arg)) {
1900 // semi-hack
1901 _dump_thread_info((Thread *)arg, shortInfo);
1902 continue;
1903 }
1904
1905 // walk through the thread list, trying to match name or id
1906 bool found = false;
1907 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1908 Thread* thread = it.Next();) {
1909 if (!strcmp(name, thread->name) || thread->id == (thread_id)arg) {
1910 _dump_thread_info(thread, shortInfo);
1911 found = true;
1912 break;
1913 }
1914 }
1915
1916 if (!found)
1917 kprintf("thread \"%s\" (%" B_PRId32 ") doesn't exist!\n", name, (thread_id)arg);
1918 }
1919
1920 return 0;
1921 }
1922
1923
1924 static int
dump_thread_list(int argc,char ** argv)1925 dump_thread_list(int argc, char **argv)
1926 {
1927 bool realTimeOnly = false;
1928 bool calling = false;
1929 const char *callSymbol = NULL;
1930 addr_t callStart = 0;
1931 addr_t callEnd = 0;
1932 int32 requiredState = 0;
1933 team_id team = -1;
1934 sem_id sem = -1;
1935
1936 if (!strcmp(argv[0], "realtime"))
1937 realTimeOnly = true;
1938 else if (!strcmp(argv[0], "ready"))
1939 requiredState = B_THREAD_READY;
1940 else if (!strcmp(argv[0], "running"))
1941 requiredState = B_THREAD_RUNNING;
1942 else if (!strcmp(argv[0], "waiting")) {
1943 requiredState = B_THREAD_WAITING;
1944
1945 if (argc > 1) {
1946 sem = strtoul(argv[1], NULL, 0);
1947 if (sem == 0)
1948 kprintf("ignoring invalid semaphore argument.\n");
1949 }
1950 } else if (!strcmp(argv[0], "calling")) {
1951 if (argc < 2) {
1952 kprintf("Need to give a symbol name or start and end arguments.\n");
1953 return 0;
1954 } else if (argc == 3) {
1955 callStart = parse_expression(argv[1]);
1956 callEnd = parse_expression(argv[2]);
1957 } else
1958 callSymbol = argv[1];
1959
1960 calling = true;
1961 } else if (argc > 1) {
1962 team = strtoul(argv[1], NULL, 0);
1963 if (team == 0)
1964 kprintf("ignoring invalid team argument.\n");
1965 }
1966
1967 print_thread_list_table_head();
1968
1969 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
1970 Thread* thread = it.Next();) {
1971 // filter out threads not matching the search criteria
1972 if ((requiredState && thread->state != requiredState)
1973 || (calling && !arch_debug_contains_call(thread, callSymbol,
1974 callStart, callEnd))
1975 || (sem > 0 && get_thread_wait_sem(thread) != sem)
1976 || (team > 0 && thread->team->id != team)
1977 || (realTimeOnly && thread->priority < B_REAL_TIME_DISPLAY_PRIORITY))
1978 continue;
1979
1980 _dump_thread_info(thread, true);
1981 }
1982 return 0;
1983 }
1984
1985
1986 static void
update_thread_sigmask_on_exit(Thread * thread)1987 update_thread_sigmask_on_exit(Thread* thread)
1988 {
1989 if ((thread->flags & THREAD_FLAGS_OLD_SIGMASK) != 0) {
1990 thread->flags &= ~THREAD_FLAGS_OLD_SIGMASK;
1991 sigprocmask(SIG_SETMASK, &thread->old_sig_block_mask, NULL);
1992 }
1993 }
1994
1995
1996 // #pragma mark - private kernel API
1997
1998
1999 void
thread_exit(void)2000 thread_exit(void)
2001 {
2002 cpu_status state;
2003 Thread* thread = thread_get_current_thread();
2004 Team* team = thread->team;
2005 Team* kernelTeam = team_get_kernel_team();
2006 status_t status;
2007 struct thread_debug_info debugInfo;
2008 team_id teamID = team->id;
2009
2010 TRACE(("thread %" B_PRId32 " exiting w/return code %#" B_PRIx32 "\n",
2011 thread->id, thread->exit.status));
2012
2013 if (!are_interrupts_enabled())
2014 panic("thread_exit() called with interrupts disabled!\n");
2015
2016 // boost our priority to get this over with
2017 scheduler_set_thread_priority(thread, B_URGENT_DISPLAY_PRIORITY);
2018
2019 if (team != kernelTeam) {
2020 // Delete all user timers associated with the thread.
2021 ThreadLocker threadLocker(thread);
2022 thread->DeleteUserTimers(false);
2023
2024 // detach the thread's user thread
2025 user_thread* userThread = thread->user_thread;
2026 thread->user_thread = NULL;
2027
2028 threadLocker.Unlock();
2029
2030 // Delete the thread's user thread, if it's not the main thread. If it
2031 // is, we can save the work, since it will be deleted with the team's
2032 // address space.
2033 if (thread != team->main_thread)
2034 team_free_user_thread(team, userThread);
2035 }
2036
2037 // remember the user stack area -- we will delete it below
2038 area_id userStackArea = -1;
2039 if (team->address_space != NULL && thread->user_stack_area >= 0) {
2040 userStackArea = thread->user_stack_area;
2041 thread->user_stack_area = -1;
2042 }
2043
2044 struct job_control_entry *death = NULL;
2045 struct thread_death_entry* threadDeathEntry = NULL;
2046 bool deleteTeam = false;
2047 port_id debuggerPort = -1;
2048
2049 if (team != kernelTeam) {
2050 user_debug_thread_exiting(thread);
2051
2052 if (team->main_thread == thread) {
2053 // The main thread is exiting. Shut down the whole team.
2054 deleteTeam = true;
2055
2056 // kill off all other threads and the user debugger facilities
2057 debuggerPort = team_shutdown_team(team);
2058
2059 // acquire necessary locks, which are: process group lock, kernel
2060 // team lock, parent team lock, and the team lock
2061 team->LockProcessGroup();
2062 kernelTeam->Lock();
2063 team->LockTeamAndParent(true);
2064 } else {
2065 threadDeathEntry
2066 = (thread_death_entry*)malloc(sizeof(thread_death_entry));
2067
2068 // acquire necessary locks, which are: kernel team lock and the team
2069 // lock
2070 kernelTeam->Lock();
2071 team->Lock();
2072 }
2073
2074 ThreadLocker threadLocker(thread);
2075
2076 state = disable_interrupts();
2077
2078 // swap address spaces, to make sure we're running on the kernel's pgdir
2079 vm_swap_address_space(team->address_space, VMAddressSpace::Kernel());
2080
2081 WriteSpinLocker teamLocker(thread->team_lock);
2082 SpinLocker threadCreationLocker(gThreadCreationLock);
2083 // removing the thread and putting its death entry to the parent
2084 // team needs to be an atomic operation
2085
2086 // remember how long this thread lasted
2087 bigtime_t now = system_time();
2088
2089 InterruptsSpinLocker signalLocker(kernelTeam->signal_lock);
2090 SpinLocker teamTimeLocker(kernelTeam->time_lock);
2091 SpinLocker threadTimeLocker(thread->time_lock);
2092
2093 thread->kernel_time += now - thread->last_time;
2094 thread->last_time = now;
2095
2096 team->dead_threads_kernel_time += thread->kernel_time;
2097 team->dead_threads_user_time += thread->user_time;
2098
2099 // stop/update thread/team CPU time user timers
2100 if (thread->HasActiveCPUTimeUserTimers()
2101 || team->HasActiveCPUTimeUserTimers()) {
2102 user_timer_stop_cpu_timers(thread, NULL);
2103 }
2104
2105 // deactivate CPU time user timers for the thread
2106 if (thread->HasActiveCPUTimeUserTimers())
2107 thread->DeactivateCPUTimeUserTimers();
2108
2109 threadTimeLocker.Unlock();
2110
2111 // put the thread into the kernel team until it dies
2112 remove_thread_from_team(team, thread);
2113 insert_thread_into_team(kernelTeam, thread);
2114
2115 teamTimeLocker.Unlock();
2116 signalLocker.Unlock();
2117
2118 teamLocker.Unlock();
2119
2120 if (team->death_entry != NULL) {
2121 if (--team->death_entry->remaining_threads == 0)
2122 team->death_entry->condition.NotifyOne();
2123 }
2124
2125 if (deleteTeam) {
2126 Team* parent = team->parent;
2127
2128 // Set the team job control state to "dead" and detach the job
2129 // control entry from our team struct.
2130 team_set_job_control_state(team, JOB_CONTROL_STATE_DEAD, NULL);
2131 death = team->job_control_entry;
2132 team->job_control_entry = NULL;
2133
2134 if (death != NULL) {
2135 death->InitDeadState();
2136
2137 // team_set_job_control_state() already moved our entry
2138 // into the parent's list. We just check the soft limit of
2139 // death entries.
2140 if (parent->dead_children.count > MAX_DEAD_CHILDREN) {
2141 death = parent->dead_children.entries.RemoveHead();
2142 parent->dead_children.count--;
2143 } else
2144 death = NULL;
2145 }
2146
2147 threadCreationLocker.Unlock();
2148 restore_interrupts(state);
2149
2150 threadLocker.Unlock();
2151
2152 // Get a temporary reference to the team's process group
2153 // -- team_remove_team() removes the team from the group, which
2154 // might destroy it otherwise and we wouldn't be able to unlock it.
2155 ProcessGroup* group = team->group;
2156 group->AcquireReference();
2157
2158 pid_t foregroundGroupToSignal;
2159 team_remove_team(team, foregroundGroupToSignal);
2160
2161 // unlock everything but the parent team
2162 team->Unlock();
2163 if (parent != kernelTeam)
2164 kernelTeam->Unlock();
2165 group->Unlock();
2166 group->ReleaseReference();
2167
2168 // Send SIGCHLD to the parent as long as we still have its lock.
2169 // This makes job control state change + signalling atomic.
2170 Signal childSignal(SIGCHLD, team->exit.reason, B_OK, team->id);
2171 if (team->exit.reason == CLD_EXITED) {
2172 childSignal.SetStatus(team->exit.status);
2173 } else {
2174 childSignal.SetStatus(team->exit.signal);
2175 childSignal.SetSendingUser(team->exit.signaling_user);
2176 }
2177 send_signal_to_team(parent, childSignal, B_DO_NOT_RESCHEDULE);
2178
2179 // also unlock the parent
2180 parent->Unlock();
2181
2182 // If the team was a session leader with controlling TTY, we have
2183 // to send SIGHUP to the foreground process group.
2184 if (foregroundGroupToSignal >= 0) {
2185 Signal groupSignal(SIGHUP, SI_USER, B_OK, team->id);
2186 send_signal_to_process_group(foregroundGroupToSignal,
2187 groupSignal, B_DO_NOT_RESCHEDULE);
2188 }
2189 } else {
2190 // The thread is not the main thread. We store a thread death entry
2191 // for it, unless someone is already waiting for it.
2192 if (threadDeathEntry != NULL
2193 && list_is_empty(&thread->exit.waiters)) {
2194 threadDeathEntry->thread = thread->id;
2195 threadDeathEntry->status = thread->exit.status;
2196
2197 // add entry to dead thread list
2198 list_add_item(&team->dead_threads, threadDeathEntry);
2199 }
2200
2201 threadCreationLocker.Unlock();
2202 restore_interrupts(state);
2203
2204 threadLocker.Unlock();
2205 team->Unlock();
2206 kernelTeam->Unlock();
2207 }
2208
2209 TRACE(("thread_exit: thread %" B_PRId32 " now a kernel thread!\n",
2210 thread->id));
2211 }
2212
2213 // delete the team if we're its main thread
2214 if (deleteTeam) {
2215 team_delete_team(team, debuggerPort);
2216
2217 // we need to delete any death entry that made it to here
2218 delete death;
2219 }
2220
2221 ThreadLocker threadLocker(thread);
2222
2223 state = disable_interrupts();
2224 SpinLocker threadCreationLocker(gThreadCreationLock);
2225
2226 // mark invisible in global hash/list, so it's no longer accessible
2227 WriteSpinLocker threadHashLocker(sThreadHashLock);
2228 thread->visible = false;
2229 sUsedThreads--;
2230 threadHashLocker.Unlock();
2231
2232 // Stop debugging for this thread
2233 SpinLocker threadDebugInfoLocker(thread->debug_info.lock);
2234 debugInfo = thread->debug_info;
2235 clear_thread_debug_info(&thread->debug_info, true);
2236 threadDebugInfoLocker.Unlock();
2237
2238 // Remove the select infos. We notify them a little later.
2239 select_info* selectInfos = thread->select_infos;
2240 thread->select_infos = NULL;
2241
2242 threadCreationLocker.Unlock();
2243 restore_interrupts(state);
2244
2245 threadLocker.Unlock();
2246
2247 destroy_thread_debug_info(&debugInfo);
2248
2249 // notify select infos
2250 select_info* info = selectInfos;
2251 while (info != NULL) {
2252 select_sync* sync = info->sync;
2253
2254 select_info* next = info->next;
2255 notify_select_events(info, B_EVENT_INVALID);
2256 put_select_sync(sync);
2257 info = next;
2258 }
2259
2260 // notify listeners
2261 sNotificationService.Notify(THREAD_REMOVED, thread);
2262
2263 // shutdown the thread messaging
2264
2265 status = acquire_sem_etc(thread->msg.write_sem, 1, B_RELATIVE_TIMEOUT, 0);
2266 if (status == B_WOULD_BLOCK) {
2267 // there is data waiting for us, so let us eat it
2268 thread_id sender;
2269
2270 delete_sem(thread->msg.write_sem);
2271 // first, let's remove all possibly waiting writers
2272 receive_data_etc(&sender, NULL, 0, B_RELATIVE_TIMEOUT);
2273 } else {
2274 // we probably own the semaphore here, and we're the last to do so
2275 delete_sem(thread->msg.write_sem);
2276 }
2277 // now we can safely remove the msg.read_sem
2278 delete_sem(thread->msg.read_sem);
2279
2280 // fill all death entries and delete the sem that others will use to wait
2281 // for us
2282 {
2283 sem_id cachedExitSem = thread->exit.sem;
2284
2285 ThreadLocker threadLocker(thread);
2286
2287 // make sure no one will grab this semaphore again
2288 thread->exit.sem = -1;
2289
2290 // fill all death entries
2291 thread_death_entry* entry = NULL;
2292 while ((entry = (thread_death_entry*)list_get_next_item(
2293 &thread->exit.waiters, entry)) != NULL) {
2294 entry->status = thread->exit.status;
2295 }
2296
2297 threadLocker.Unlock();
2298
2299 delete_sem(cachedExitSem);
2300 }
2301
2302 // delete the user stack, if this was a user thread
2303 if (!deleteTeam && userStackArea >= 0) {
2304 // We postponed deleting the user stack until now, since this way all
2305 // notifications for the thread's death are out already and all other
2306 // threads waiting for this thread's death and some object on its stack
2307 // will wake up before we (try to) delete the stack area. Of most
2308 // relevance is probably the case where this is the main thread and
2309 // other threads use objects on its stack -- so we want them terminated
2310 // first.
2311 // When the team is deleted, all areas are deleted anyway, so we don't
2312 // need to do that explicitly in that case.
2313 vm_delete_area(teamID, userStackArea, true);
2314 }
2315
2316 // notify the debugger
2317 if (teamID != kernelTeam->id)
2318 user_debug_thread_deleted(teamID, thread->id, thread->exit.status);
2319
2320 // enqueue in the undertaker list and reschedule for the last time
2321 UndertakerEntry undertakerEntry(thread, teamID);
2322
2323 disable_interrupts();
2324
2325 SpinLocker schedulerLocker(thread->scheduler_lock);
2326
2327 SpinLocker undertakerLocker(sUndertakerLock);
2328 sUndertakerEntries.Add(&undertakerEntry);
2329 sUndertakerCondition.NotifyOne();
2330 undertakerLocker.Unlock();
2331
2332 scheduler_reschedule(THREAD_STATE_FREE_ON_RESCHED);
2333
2334 panic("never can get here\n");
2335 }
2336
2337
2338 /*! Called in the interrupt handler code when a thread enters
2339 the kernel for any reason.
2340 Only tracks time for now.
2341 Interrupts are disabled.
2342 */
2343 void
thread_at_kernel_entry(bigtime_t now)2344 thread_at_kernel_entry(bigtime_t now)
2345 {
2346 Thread *thread = thread_get_current_thread();
2347
2348 TRACE(("thread_at_kernel_entry: entry thread %" B_PRId32 "\n", thread->id));
2349
2350 // track user time
2351 SpinLocker threadTimeLocker(thread->time_lock);
2352 thread->user_time += now - thread->last_time;
2353 thread->last_time = now;
2354 thread->in_kernel = true;
2355 threadTimeLocker.Unlock();
2356 }
2357
2358
2359 /*! Called whenever a thread exits kernel space to user space.
2360 Tracks time, handles signals, ...
2361 Interrupts must be enabled. When the function returns, interrupts will be
2362 disabled.
2363 The function may not return. This e.g. happens when the thread has received
2364 a deadly signal.
2365 */
2366 void
thread_at_kernel_exit(void)2367 thread_at_kernel_exit(void)
2368 {
2369 Thread *thread = thread_get_current_thread();
2370
2371 TRACE(("thread_at_kernel_exit: exit thread %" B_PRId32 "\n", thread->id));
2372
2373 handle_signals(thread);
2374
2375 disable_interrupts();
2376
2377 update_thread_sigmask_on_exit(thread);
2378
2379 // track kernel time
2380 bigtime_t now = system_time();
2381 SpinLocker threadTimeLocker(thread->time_lock);
2382 thread->in_kernel = false;
2383 thread->kernel_time += now - thread->last_time;
2384 thread->last_time = now;
2385 }
2386
2387
2388 /*! The quick version of thread_kernel_exit(), in case no signals are pending
2389 and no debugging shall be done.
2390 Interrupts must be disabled.
2391 */
2392 void
thread_at_kernel_exit_no_signals(void)2393 thread_at_kernel_exit_no_signals(void)
2394 {
2395 Thread *thread = thread_get_current_thread();
2396
2397 TRACE(("thread_at_kernel_exit_no_signals: exit thread %" B_PRId32 "\n",
2398 thread->id));
2399
2400 update_thread_sigmask_on_exit(thread);
2401
2402 // track kernel time
2403 bigtime_t now = system_time();
2404 SpinLocker threadTimeLocker(thread->time_lock);
2405 thread->in_kernel = false;
2406 thread->kernel_time += now - thread->last_time;
2407 thread->last_time = now;
2408 }
2409
2410
2411 void
thread_reset_for_exec(void)2412 thread_reset_for_exec(void)
2413 {
2414 Thread* thread = thread_get_current_thread();
2415
2416 ThreadLocker threadLocker(thread);
2417
2418 // delete user-defined timers
2419 thread->DeleteUserTimers(true);
2420
2421 // cancel pre-defined timer
2422 if (UserTimer* timer = thread->UserTimerFor(USER_TIMER_REAL_TIME_ID))
2423 timer->Cancel();
2424
2425 // reset user_thread and user stack
2426 thread->user_thread = NULL;
2427 thread->user_stack_area = -1;
2428 thread->user_stack_base = 0;
2429 thread->user_stack_size = 0;
2430
2431 // reset signals
2432 thread->ResetSignalsOnExec();
2433
2434 // reset thread CPU time clock
2435 InterruptsSpinLocker timeLocker(thread->time_lock);
2436 thread->cpu_clock_offset = -thread->CPUTime(false);
2437 }
2438
2439
2440 thread_id
allocate_thread_id()2441 allocate_thread_id()
2442 {
2443 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock);
2444
2445 // find the next unused ID
2446 thread_id id;
2447 do {
2448 id = sNextThreadID++;
2449
2450 // deal with integer overflow
2451 if (sNextThreadID < 0)
2452 sNextThreadID = 2;
2453
2454 // check whether the ID is already in use
2455 } while (sThreadHash.Lookup(id, false) != NULL);
2456
2457 return id;
2458 }
2459
2460
2461 thread_id
peek_next_thread_id()2462 peek_next_thread_id()
2463 {
2464 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock);
2465 return sNextThreadID;
2466 }
2467
2468
2469 /*! Yield the CPU to other threads.
2470 Thread will continue to run, if there's no other thread in ready
2471 state, and if it has a higher priority than the other ready threads, it
2472 still has a good chance to continue.
2473 */
2474 void
thread_yield(void)2475 thread_yield(void)
2476 {
2477 Thread *thread = thread_get_current_thread();
2478 if (thread == NULL)
2479 return;
2480
2481 InterruptsSpinLocker _(thread->scheduler_lock);
2482
2483 thread->has_yielded = true;
2484 scheduler_reschedule(B_THREAD_READY);
2485 }
2486
2487
2488 void
thread_map(void (* function)(Thread * thread,void * data),void * data)2489 thread_map(void (*function)(Thread* thread, void* data), void* data)
2490 {
2491 InterruptsWriteSpinLocker threadHashLocker(sThreadHashLock);
2492
2493 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
2494 Thread* thread = it.Next();) {
2495 function(thread, data);
2496 }
2497 }
2498
2499
2500 /*! Kernel private thread creation function.
2501 */
2502 thread_id
spawn_kernel_thread_etc(thread_func function,const char * name,int32 priority,void * arg,team_id team)2503 spawn_kernel_thread_etc(thread_func function, const char *name, int32 priority,
2504 void *arg, team_id team)
2505 {
2506 return thread_create_thread(
2507 ThreadCreationAttributes(function, name, priority, arg, team),
2508 true);
2509 }
2510
2511
2512 status_t
wait_for_thread_etc(thread_id id,uint32 flags,bigtime_t timeout,status_t * _returnCode)2513 wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
2514 status_t *_returnCode)
2515 {
2516 if (id < 0)
2517 return B_BAD_THREAD_ID;
2518 if (id == thread_get_current_thread_id())
2519 return EDEADLK;
2520
2521 // get the thread, queue our death entry, and fetch the semaphore we have to
2522 // wait on
2523 sem_id exitSem = B_BAD_THREAD_ID;
2524 struct thread_death_entry death;
2525
2526 Thread* thread = Thread::GetAndLock(id);
2527 if (thread != NULL) {
2528 // remember the semaphore we have to wait on and place our death entry
2529 exitSem = thread->exit.sem;
2530 if (exitSem >= 0)
2531 list_add_link_to_head(&thread->exit.waiters, &death);
2532
2533 thread->UnlockAndReleaseReference();
2534
2535 if (exitSem < 0)
2536 return B_BAD_THREAD_ID;
2537 } else {
2538 // we couldn't find this thread -- maybe it's already gone, and we'll
2539 // find its death entry in our team
2540 Team* team = thread_get_current_thread()->team;
2541 TeamLocker teamLocker(team);
2542
2543 // check the child death entries first (i.e. main threads of child
2544 // teams)
2545 bool deleteEntry;
2546 job_control_entry* freeDeath
2547 = team_get_death_entry(team, id, &deleteEntry);
2548 if (freeDeath != NULL) {
2549 death.status = freeDeath->status;
2550 if (deleteEntry)
2551 delete freeDeath;
2552 } else {
2553 // check the thread death entries of the team (non-main threads)
2554 thread_death_entry* threadDeathEntry = NULL;
2555 while ((threadDeathEntry = (thread_death_entry*)list_get_next_item(
2556 &team->dead_threads, threadDeathEntry)) != NULL) {
2557 if (threadDeathEntry->thread == id) {
2558 list_remove_item(&team->dead_threads, threadDeathEntry);
2559 death.status = threadDeathEntry->status;
2560 free(threadDeathEntry);
2561 break;
2562 }
2563 }
2564
2565 if (threadDeathEntry == NULL)
2566 return B_BAD_THREAD_ID;
2567 }
2568
2569 // we found the thread's death entry in our team
2570 if (_returnCode)
2571 *_returnCode = death.status;
2572
2573 return B_OK;
2574 }
2575
2576 // we need to wait for the death of the thread
2577
2578 resume_thread(id);
2579 // make sure we don't wait forever on a suspended thread
2580
2581 status_t status = acquire_sem_etc(exitSem, 1, flags, timeout);
2582
2583 if (status == B_OK) {
2584 // this should never happen as the thread deletes the semaphore on exit
2585 panic("could acquire exit_sem for thread %" B_PRId32 "\n", id);
2586 } else if (status == B_BAD_SEM_ID) {
2587 // this is the way the thread normally exits
2588 status = B_OK;
2589 } else {
2590 // We were probably interrupted or the timeout occurred; we need to
2591 // remove our death entry now.
2592 thread = Thread::GetAndLock(id);
2593 if (thread != NULL) {
2594 list_remove_link(&death);
2595 thread->UnlockAndReleaseReference();
2596 } else {
2597 // The thread is already gone, so we need to wait uninterruptibly
2598 // for its exit semaphore to make sure our death entry stays valid.
2599 // It won't take long, since the thread is apparently already in the
2600 // middle of the cleanup.
2601 acquire_sem(exitSem);
2602 status = B_OK;
2603 }
2604 }
2605
2606 if (status == B_OK && _returnCode != NULL)
2607 *_returnCode = death.status;
2608
2609 return status;
2610 }
2611
2612
2613 status_t
select_thread(int32 id,struct select_info * info,bool kernel)2614 select_thread(int32 id, struct select_info* info, bool kernel)
2615 {
2616 // get and lock the thread
2617 Thread* thread = Thread::GetAndLock(id);
2618 if (thread == NULL)
2619 return B_BAD_THREAD_ID;
2620 BReference<Thread> threadReference(thread, true);
2621 ThreadLocker threadLocker(thread, true);
2622
2623 // We support only B_EVENT_INVALID at the moment.
2624 info->selected_events &= B_EVENT_INVALID;
2625
2626 // add info to list
2627 if (info->selected_events != 0) {
2628 info->next = thread->select_infos;
2629 thread->select_infos = info;
2630
2631 // we need a sync reference
2632 acquire_select_sync(info->sync);
2633 }
2634
2635 return B_OK;
2636 }
2637
2638
2639 status_t
deselect_thread(int32 id,struct select_info * info,bool kernel)2640 deselect_thread(int32 id, struct select_info* info, bool kernel)
2641 {
2642 // get and lock the thread
2643 Thread* thread = Thread::GetAndLock(id);
2644 if (thread == NULL)
2645 return B_BAD_THREAD_ID;
2646 BReference<Thread> threadReference(thread, true);
2647 ThreadLocker threadLocker(thread, true);
2648
2649 // remove info from list
2650 select_info** infoLocation = &thread->select_infos;
2651 while (*infoLocation != NULL && *infoLocation != info)
2652 infoLocation = &(*infoLocation)->next;
2653
2654 if (*infoLocation != info)
2655 return B_OK;
2656
2657 *infoLocation = info->next;
2658
2659 threadLocker.Unlock();
2660
2661 // surrender sync reference
2662 put_select_sync(info->sync);
2663
2664 return B_OK;
2665 }
2666
2667
2668 int32
thread_max_threads(void)2669 thread_max_threads(void)
2670 {
2671 return sMaxThreads;
2672 }
2673
2674
2675 int32
thread_used_threads(void)2676 thread_used_threads(void)
2677 {
2678 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock);
2679 return sUsedThreads;
2680 }
2681
2682
2683 /*! Returns a user-readable string for a thread state.
2684 Only for use in the kernel debugger.
2685 */
2686 const char*
thread_state_to_text(Thread * thread,int32 state)2687 thread_state_to_text(Thread* thread, int32 state)
2688 {
2689 return state_to_text(thread, state);
2690 }
2691
2692
2693 int32
thread_get_io_priority(thread_id id)2694 thread_get_io_priority(thread_id id)
2695 {
2696 Thread* thread = Thread::GetAndLock(id);
2697 if (thread == NULL)
2698 return B_BAD_THREAD_ID;
2699 BReference<Thread> threadReference(thread, true);
2700 ThreadLocker threadLocker(thread, true);
2701
2702 int32 priority = thread->io_priority;
2703 if (priority < 0) {
2704 // negative I/O priority means using the (CPU) priority
2705 priority = thread->priority;
2706 }
2707
2708 return priority;
2709 }
2710
2711
2712 void
thread_set_io_priority(int32 priority)2713 thread_set_io_priority(int32 priority)
2714 {
2715 Thread* thread = thread_get_current_thread();
2716 ThreadLocker threadLocker(thread);
2717
2718 thread->io_priority = priority;
2719 }
2720
2721
2722 status_t
thread_init(kernel_args * args)2723 thread_init(kernel_args *args)
2724 {
2725 TRACE(("thread_init: entry\n"));
2726
2727 // create the thread hash table
2728 new(&sThreadHash) ThreadHashTable();
2729 if (sThreadHash.Init(128) != B_OK)
2730 panic("thread_init(): failed to init thread hash table!");
2731
2732 // create the thread structure object cache
2733 sThreadCache = create_object_cache("threads", sizeof(Thread), 64, NULL,
2734 NULL, NULL);
2735 // Note: The x86 port requires 64 byte alignment of thread structures.
2736 if (sThreadCache == NULL)
2737 panic("thread_init(): failed to allocate thread object cache!");
2738
2739 if (arch_thread_init(args) < B_OK)
2740 panic("arch_thread_init() failed!\n");
2741
2742 // skip all thread IDs including B_SYSTEM_TEAM, which is reserved
2743 sNextThreadID = B_SYSTEM_TEAM + 1;
2744
2745 // create an idle thread for each cpu
2746 for (uint32 i = 0; i < args->num_cpus; i++) {
2747 Thread *thread;
2748 area_info info;
2749 char name[64];
2750
2751 sprintf(name, "idle thread %" B_PRIu32, i + 1);
2752 thread = new(&sIdleThreads[i]) Thread(name,
2753 i == 0 ? team_get_kernel_team_id() : -1, &gCPU[i]);
2754 if (thread == NULL || thread->Init(true) != B_OK) {
2755 panic("error creating idle thread struct\n");
2756 return B_NO_MEMORY;
2757 }
2758
2759 gCPU[i].running_thread = thread;
2760
2761 thread->team = team_get_kernel_team();
2762 thread->priority = B_IDLE_PRIORITY;
2763 thread->state = B_THREAD_RUNNING;
2764
2765 sprintf(name, "idle thread %" B_PRIu32 " kstack", i + 1);
2766 thread->kernel_stack_area = find_area(name);
2767
2768 if (get_area_info(thread->kernel_stack_area, &info) != B_OK)
2769 panic("error finding idle kstack area\n");
2770
2771 thread->kernel_stack_base = (addr_t)info.address;
2772 thread->kernel_stack_top = thread->kernel_stack_base + info.size;
2773
2774 thread->visible = true;
2775 insert_thread_into_team(thread->team, thread);
2776
2777 scheduler_on_thread_init(thread);
2778 }
2779 sUsedThreads = args->num_cpus;
2780
2781 // init the notification service
2782 new(&sNotificationService) ThreadNotificationService();
2783
2784 sNotificationService.Register();
2785
2786 // start the undertaker thread
2787 new(&sUndertakerEntries) DoublyLinkedList<UndertakerEntry>();
2788 sUndertakerCondition.Init(&sUndertakerEntries, "undertaker entries");
2789
2790 thread_id undertakerThread = spawn_kernel_thread(&undertaker, "undertaker",
2791 B_DISPLAY_PRIORITY, NULL);
2792 if (undertakerThread < 0)
2793 panic("Failed to create undertaker thread!");
2794 resume_thread(undertakerThread);
2795
2796 // set up some debugger commands
2797 add_debugger_command_etc("threads", &dump_thread_list, "List all threads",
2798 "[ <team> ]\n"
2799 "Prints a list of all existing threads, or, if a team ID is given,\n"
2800 "all threads of the specified team.\n"
2801 " <team> - The ID of the team whose threads shall be listed.\n", 0);
2802 add_debugger_command_etc("ready", &dump_thread_list,
2803 "List all ready threads",
2804 "\n"
2805 "Prints a list of all threads in ready state.\n", 0);
2806 add_debugger_command_etc("running", &dump_thread_list,
2807 "List all running threads",
2808 "\n"
2809 "Prints a list of all threads in running state.\n", 0);
2810 add_debugger_command_etc("waiting", &dump_thread_list,
2811 "List all waiting threads (optionally for a specific semaphore)",
2812 "[ <sem> ]\n"
2813 "Prints a list of all threads in waiting state. If a semaphore is\n"
2814 "specified, only the threads waiting on that semaphore are listed.\n"
2815 " <sem> - ID of the semaphore.\n", 0);
2816 add_debugger_command_etc("realtime", &dump_thread_list,
2817 "List all realtime threads",
2818 "\n"
2819 "Prints a list of all threads with realtime priority.\n", 0);
2820 add_debugger_command_etc("thread", &dump_thread_info,
2821 "Dump info about a particular thread",
2822 "[ -s ] ( <id> | <address> | <name> )*\n"
2823 "Prints information about the specified thread. If no argument is\n"
2824 "given the current thread is selected.\n"
2825 " -s - Print info in compact table form (like \"threads\").\n"
2826 " <id> - The ID of the thread.\n"
2827 " <address> - The address of the thread structure.\n"
2828 " <name> - The thread's name.\n", 0);
2829 add_debugger_command_etc("calling", &dump_thread_list,
2830 "Show all threads that have a specific address in their call chain",
2831 "{ <symbol-pattern> | <start> <end> }\n", 0);
2832 add_debugger_command_etc("unreal", &make_thread_unreal,
2833 "Set realtime priority threads to normal priority",
2834 "[ <id> ]\n"
2835 "Sets the priority of all realtime threads or, if given, the one\n"
2836 "with the specified ID to \"normal\" priority.\n"
2837 " <id> - The ID of the thread.\n", 0);
2838 add_debugger_command_etc("suspend", &make_thread_suspended,
2839 "Suspend a thread",
2840 "[ <id> ]\n"
2841 "Suspends the thread with the given ID. If no ID argument is given\n"
2842 "the current thread is selected.\n"
2843 " <id> - The ID of the thread.\n", 0);
2844 add_debugger_command_etc("resume", &make_thread_resumed, "Resume a thread",
2845 "<id>\n"
2846 "Resumes the specified thread, if it is currently suspended.\n"
2847 " <id> - The ID of the thread.\n", 0);
2848 add_debugger_command_etc("drop", &drop_into_debugger,
2849 "Drop a thread into the userland debugger",
2850 "<id>\n"
2851 "Drops the specified (userland) thread into the userland debugger\n"
2852 "after leaving the kernel debugger.\n"
2853 " <id> - The ID of the thread.\n", 0);
2854 add_debugger_command_etc("priority", &set_thread_prio,
2855 "Set a thread's priority",
2856 "<priority> [ <id> ]\n"
2857 "Sets the priority of the thread with the specified ID to the given\n"
2858 "priority. If no thread ID is given, the current thread is selected.\n"
2859 " <priority> - The thread's new priority (0 - 120)\n"
2860 " <id> - The ID of the thread.\n", 0);
2861
2862 return B_OK;
2863 }
2864
2865
2866 status_t
thread_preboot_init_percpu(struct kernel_args * args,int32 cpuNum)2867 thread_preboot_init_percpu(struct kernel_args *args, int32 cpuNum)
2868 {
2869 // set up the cpu pointer in the not yet initialized per-cpu idle thread
2870 // so that get_current_cpu and friends will work, which is crucial for
2871 // a lot of low level routines
2872 sIdleThreads[cpuNum].cpu = &gCPU[cpuNum];
2873 arch_thread_set_current_thread(&sIdleThreads[cpuNum]);
2874 return B_OK;
2875 }
2876
2877
2878 // #pragma mark - thread blocking API
2879
2880
2881 static int32
thread_block_timeout(timer * timer)2882 thread_block_timeout(timer* timer)
2883 {
2884 Thread* thread = (Thread*)timer->user_data;
2885 thread_unblock(thread, B_TIMED_OUT);
2886
2887 timer->user_data = NULL;
2888 return B_HANDLED_INTERRUPT;
2889 }
2890
2891
2892 /*! Blocks the current thread.
2893
2894 The thread is blocked until someone else unblock it. Must be called after a
2895 call to thread_prepare_to_block(). If the thread has already been unblocked
2896 after the previous call to thread_prepare_to_block(), this function will
2897 return immediately. Cf. the documentation of thread_prepare_to_block() for
2898 more details.
2899
2900 The caller must hold the scheduler lock.
2901
2902 \param thread The current thread.
2903 \return The error code passed to the unblocking function. thread_interrupt()
2904 uses \c B_INTERRUPTED. By convention \c B_OK means that the wait was
2905 successful while another error code indicates a failure (what that means
2906 depends on the client code).
2907 */
2908 static inline status_t
thread_block_locked(Thread * thread)2909 thread_block_locked(Thread* thread)
2910 {
2911 if (thread->wait.status == 1) {
2912 // check for signals, if interruptible
2913 if (thread_is_interrupted(thread, thread->wait.flags)) {
2914 thread->wait.status = B_INTERRUPTED;
2915 } else
2916 scheduler_reschedule(B_THREAD_WAITING);
2917 }
2918
2919 return thread->wait.status;
2920 }
2921
2922
2923 /*! Blocks the current thread.
2924
2925 The function acquires the scheduler lock and calls thread_block_locked().
2926 See there for more information.
2927 */
2928 status_t
thread_block()2929 thread_block()
2930 {
2931 InterruptsSpinLocker _(thread_get_current_thread()->scheduler_lock);
2932 return thread_block_locked(thread_get_current_thread());
2933 }
2934
2935
2936 /*! Blocks the current thread with a timeout.
2937
2938 The current thread is blocked until someone else unblock it or the specified
2939 timeout occurs. Must be called after a call to thread_prepare_to_block(). If
2940 the thread has already been unblocked after the previous call to
2941 thread_prepare_to_block(), this function will return immediately. See
2942 thread_prepare_to_block() for more details.
2943
2944 The caller must not hold the scheduler lock.
2945
2946 \param timeoutFlags The standard timeout flags:
2947 - \c B_RELATIVE_TIMEOUT: \a timeout specifies the time to wait.
2948 - \c B_ABSOLUTE_TIMEOUT: \a timeout specifies the absolute end time when
2949 the timeout shall occur.
2950 - \c B_TIMEOUT_REAL_TIME_BASE: Only relevant when \c B_ABSOLUTE_TIMEOUT
2951 is specified, too. Specifies that \a timeout is a real time, not a
2952 system time.
2953 If neither \c B_RELATIVE_TIMEOUT nor \c B_ABSOLUTE_TIMEOUT are
2954 specified, an infinite timeout is implied and the function behaves like
2955 thread_block_locked().
2956 \return The error code passed to the unblocking function. thread_interrupt()
2957 uses \c B_INTERRUPTED. When the timeout occurred, \c B_TIMED_OUT is
2958 returned. By convention \c B_OK means that the wait was successful while
2959 another error code indicates a failure (what that means depends on the
2960 client code).
2961 */
2962 status_t
thread_block_with_timeout(uint32 timeoutFlags,bigtime_t timeout)2963 thread_block_with_timeout(uint32 timeoutFlags, bigtime_t timeout)
2964 {
2965 Thread* thread = thread_get_current_thread();
2966
2967 InterruptsSpinLocker locker(thread->scheduler_lock);
2968
2969 if (thread->wait.status != 1)
2970 return thread->wait.status;
2971
2972 bool useTimer = (timeoutFlags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0
2973 && timeout != B_INFINITE_TIMEOUT;
2974
2975 if (useTimer) {
2976 // Timer flags: absolute/relative.
2977 uint32 timerFlags;
2978 if ((timeoutFlags & B_RELATIVE_TIMEOUT) != 0) {
2979 timerFlags = B_ONE_SHOT_RELATIVE_TIMER;
2980 } else {
2981 timerFlags = B_ONE_SHOT_ABSOLUTE_TIMER;
2982 if ((timeoutFlags & B_TIMEOUT_REAL_TIME_BASE) != 0)
2983 timerFlags |= B_TIMER_REAL_TIME_BASE;
2984 }
2985
2986 // install the timer
2987 thread->wait.unblock_timer.user_data = thread;
2988 add_timer(&thread->wait.unblock_timer, &thread_block_timeout, timeout,
2989 timerFlags);
2990 }
2991
2992 status_t error = thread_block_locked(thread);
2993
2994 locker.Unlock();
2995
2996 // cancel timer, if it didn't fire
2997 if (useTimer && thread->wait.unblock_timer.user_data != NULL)
2998 cancel_timer(&thread->wait.unblock_timer);
2999
3000 return error;
3001 }
3002
3003
3004 /*! Unblocks a thread.
3005
3006 Acquires the scheduler lock and calls thread_unblock_locked().
3007 See there for more information.
3008 */
3009 void
thread_unblock(Thread * thread,status_t status)3010 thread_unblock(Thread* thread, status_t status)
3011 {
3012 InterruptsSpinLocker locker(thread->scheduler_lock);
3013 thread_unblock_locked(thread, status);
3014 }
3015
3016
3017 /*! Unblocks a userland-blocked thread.
3018 The caller must not hold any locks.
3019 */
3020 static status_t
user_unblock_thread(thread_id threadID,status_t status)3021 user_unblock_thread(thread_id threadID, status_t status)
3022 {
3023 // get the thread
3024 Thread* thread = Thread::GetAndLock(threadID);
3025 if (thread == NULL)
3026 return B_BAD_THREAD_ID;
3027 BReference<Thread> threadReference(thread, true);
3028 ThreadLocker threadLocker(thread, true);
3029
3030 if (thread->user_thread == NULL)
3031 return B_NOT_ALLOWED;
3032
3033 InterruptsSpinLocker locker(thread->scheduler_lock);
3034
3035 status_t waitStatus;
3036 if (user_memcpy(&waitStatus, &thread->user_thread->wait_status,
3037 sizeof(waitStatus)) < B_OK) {
3038 return B_BAD_ADDRESS;
3039 }
3040 if (waitStatus > 0) {
3041 if (user_memcpy(&thread->user_thread->wait_status, &status,
3042 sizeof(status)) < B_OK) {
3043 return B_BAD_ADDRESS;
3044 }
3045
3046 // Even if the user_thread->wait_status was > 0, it may be the
3047 // case that this thread is actually blocked on something else.
3048 if (thread->wait.status > 0
3049 && thread->wait.type == THREAD_BLOCK_TYPE_USER) {
3050 thread_unblock_locked(thread, status);
3051 }
3052 }
3053 return B_OK;
3054 }
3055
3056
3057 static bool
thread_check_permissions(const Thread * currentThread,const Thread * thread,bool kernel)3058 thread_check_permissions(const Thread* currentThread, const Thread* thread,
3059 bool kernel)
3060 {
3061 if (kernel)
3062 return true;
3063
3064 if (thread->team->id == team_get_kernel_team_id())
3065 return false;
3066
3067 if (thread->team == currentThread->team
3068 || currentThread->team->effective_uid == 0
3069 || thread->team->real_uid == currentThread->team->real_uid)
3070 return true;
3071
3072 return false;
3073 }
3074
3075
3076 static status_t
thread_send_signal(thread_id id,uint32 number,int32 signalCode,int32 errorCode,bool kernel)3077 thread_send_signal(thread_id id, uint32 number, int32 signalCode,
3078 int32 errorCode, bool kernel)
3079 {
3080 if (id <= 0)
3081 return B_BAD_VALUE;
3082
3083 Thread* currentThread = thread_get_current_thread();
3084 Thread* thread = Thread::Get(id);
3085 if (thread == NULL)
3086 return B_BAD_THREAD_ID;
3087 BReference<Thread> threadReference(thread, true);
3088
3089 // check whether sending the signal is allowed
3090 if (!thread_check_permissions(currentThread, thread, kernel))
3091 return B_NOT_ALLOWED;
3092
3093 Signal signal(number, signalCode, errorCode, currentThread->team->id);
3094 return send_signal_to_thread(thread, signal, 0);
3095 }
3096
3097
3098 // #pragma mark - public kernel API
3099
3100
3101 void
exit_thread(status_t returnValue)3102 exit_thread(status_t returnValue)
3103 {
3104 Thread *thread = thread_get_current_thread();
3105 Team* team = thread->team;
3106
3107 thread->exit.status = returnValue;
3108
3109 // if called from a kernel thread, we don't deliver the signal,
3110 // we just exit directly to keep the user space behaviour of
3111 // this function
3112 if (team != team_get_kernel_team()) {
3113 // If this is its main thread, set the team's exit status.
3114 if (thread == team->main_thread) {
3115 TeamLocker teamLocker(team);
3116
3117 if (!team->exit.initialized) {
3118 team->exit.reason = CLD_EXITED;
3119 team->exit.signal = 0;
3120 team->exit.signaling_user = 0;
3121 team->exit.status = returnValue;
3122 team->exit.initialized = true;
3123 }
3124
3125 teamLocker.Unlock();
3126 }
3127
3128 Signal signal(SIGKILLTHR, SI_USER, B_OK, team->id);
3129 send_signal_to_thread(thread, signal, B_DO_NOT_RESCHEDULE);
3130 } else
3131 thread_exit();
3132 }
3133
3134
3135 static status_t
thread_kill_thread(thread_id id,bool kernel)3136 thread_kill_thread(thread_id id, bool kernel)
3137 {
3138 return thread_send_signal(id, SIGKILLTHR, SI_USER, B_OK, kernel);
3139 }
3140
3141
3142 status_t
kill_thread(thread_id id)3143 kill_thread(thread_id id)
3144 {
3145 return thread_kill_thread(id, true);
3146 }
3147
3148
3149 status_t
send_data(thread_id thread,int32 code,const void * buffer,size_t bufferSize)3150 send_data(thread_id thread, int32 code, const void *buffer, size_t bufferSize)
3151 {
3152 return send_data_etc(thread, code, buffer, bufferSize, 0);
3153 }
3154
3155
3156 int32
receive_data(thread_id * sender,void * buffer,size_t bufferSize)3157 receive_data(thread_id *sender, void *buffer, size_t bufferSize)
3158 {
3159 return receive_data_etc(sender, buffer, bufferSize, 0);
3160 }
3161
3162
3163 static bool
thread_has_data(thread_id id,bool kernel)3164 thread_has_data(thread_id id, bool kernel)
3165 {
3166 Thread* currentThread = thread_get_current_thread();
3167 Thread* thread;
3168 BReference<Thread> threadReference;
3169 if (id == currentThread->id) {
3170 thread = currentThread;
3171 } else {
3172 thread = Thread::Get(id);
3173 if (thread == NULL)
3174 return false;
3175
3176 threadReference.SetTo(thread, true);
3177 }
3178
3179 if (!kernel && thread->team != currentThread->team)
3180 return false;
3181
3182 int32 count;
3183 if (get_sem_count(thread->msg.read_sem, &count) != B_OK)
3184 return false;
3185
3186 return count == 0 ? false : true;
3187 }
3188
3189
3190 bool
has_data(thread_id thread)3191 has_data(thread_id thread)
3192 {
3193 return thread_has_data(thread, true);
3194 }
3195
3196
3197 status_t
_get_thread_info(thread_id id,thread_info * info,size_t size)3198 _get_thread_info(thread_id id, thread_info *info, size_t size)
3199 {
3200 if (info == NULL || size != sizeof(thread_info) || id < B_OK)
3201 return B_BAD_VALUE;
3202
3203 // get the thread
3204 Thread* thread = Thread::GetAndLock(id);
3205 if (thread == NULL)
3206 return B_BAD_THREAD_ID;
3207 BReference<Thread> threadReference(thread, true);
3208 ThreadLocker threadLocker(thread, true);
3209
3210 // fill the info -- also requires the scheduler lock to be held
3211 InterruptsSpinLocker locker(thread->scheduler_lock);
3212
3213 fill_thread_info(thread, info, size);
3214
3215 return B_OK;
3216 }
3217
3218
3219 status_t
_get_next_thread_info(team_id teamID,int32 * _cookie,thread_info * info,size_t size)3220 _get_next_thread_info(team_id teamID, int32 *_cookie, thread_info *info,
3221 size_t size)
3222 {
3223 if (info == NULL || size != sizeof(thread_info) || teamID < 0)
3224 return B_BAD_VALUE;
3225
3226 int32 lastID = *_cookie;
3227
3228 // get the team
3229 Team* team = Team::GetAndLock(teamID);
3230 if (team == NULL)
3231 return B_BAD_VALUE;
3232 BReference<Team> teamReference(team, true);
3233 TeamLocker teamLocker(team, true);
3234
3235 Thread* thread = NULL;
3236
3237 if (lastID == 0) {
3238 // We start with the main thread
3239 thread = team->main_thread;
3240 } else {
3241 // Find the one thread with an ID greater than ours (as long as the IDs
3242 // don't wrap they are always sorted from highest to lowest).
3243 // TODO: That is broken not only when the IDs wrap, but also for the
3244 // kernel team, to which threads are added when they are dying.
3245 for (Thread* next = team->thread_list; next != NULL;
3246 next = next->team_next) {
3247 if (next->id <= lastID)
3248 break;
3249
3250 thread = next;
3251 }
3252 }
3253
3254 if (thread == NULL)
3255 return B_BAD_VALUE;
3256
3257 lastID = thread->id;
3258 *_cookie = lastID;
3259
3260 ThreadLocker threadLocker(thread);
3261 InterruptsSpinLocker locker(thread->scheduler_lock);
3262
3263 fill_thread_info(thread, info, size);
3264
3265 return B_OK;
3266 }
3267
3268
3269 thread_id
find_thread(const char * name)3270 find_thread(const char* name)
3271 {
3272 if (name == NULL)
3273 return thread_get_current_thread_id();
3274
3275 InterruptsReadSpinLocker threadHashLocker(sThreadHashLock);
3276
3277 // Scanning the whole hash with the thread hash lock held isn't exactly
3278 // cheap, but since this function is probably used very rarely, and we
3279 // only need a read lock, it's probably acceptable.
3280
3281 for (ThreadHashTable::Iterator it = sThreadHash.GetIterator();
3282 Thread* thread = it.Next();) {
3283 if (!thread->visible)
3284 continue;
3285
3286 if (strcmp(thread->name, name) == 0)
3287 return thread->id;
3288 }
3289
3290 return B_NAME_NOT_FOUND;
3291 }
3292
3293
3294 status_t
rename_thread(thread_id id,const char * name)3295 rename_thread(thread_id id, const char* name)
3296 {
3297 if (name == NULL)
3298 return B_BAD_VALUE;
3299
3300 // get the thread
3301 Thread* thread = Thread::GetAndLock(id);
3302 if (thread == NULL)
3303 return B_BAD_THREAD_ID;
3304 BReference<Thread> threadReference(thread, true);
3305 ThreadLocker threadLocker(thread, true);
3306
3307 // check whether the operation is allowed
3308 if (thread->team != thread_get_current_thread()->team)
3309 return B_NOT_ALLOWED;
3310
3311 strlcpy(thread->name, name, B_OS_NAME_LENGTH);
3312
3313 team_id teamID = thread->team->id;
3314
3315 threadLocker.Unlock();
3316
3317 // notify listeners
3318 sNotificationService.Notify(THREAD_NAME_CHANGED, teamID, id);
3319 // don't pass the thread structure, as it's unsafe, if it isn't ours
3320
3321 return B_OK;
3322 }
3323
3324
3325 static status_t
thread_set_thread_priority(thread_id id,int32 priority,bool kernel)3326 thread_set_thread_priority(thread_id id, int32 priority, bool kernel)
3327 {
3328 // make sure the passed in priority is within bounds
3329 if (priority > THREAD_MAX_SET_PRIORITY)
3330 priority = THREAD_MAX_SET_PRIORITY;
3331 if (priority < THREAD_MIN_SET_PRIORITY)
3332 priority = THREAD_MIN_SET_PRIORITY;
3333
3334 // get the thread
3335 Thread* thread = Thread::GetAndLock(id);
3336 if (thread == NULL)
3337 return B_BAD_THREAD_ID;
3338 BReference<Thread> threadReference(thread, true);
3339 ThreadLocker threadLocker(thread, true);
3340
3341 // check whether the change is allowed
3342 if (thread_is_idle_thread(thread) || !thread_check_permissions(
3343 thread_get_current_thread(), thread, kernel))
3344 return B_NOT_ALLOWED;
3345
3346 return scheduler_set_thread_priority(thread, priority);
3347 }
3348
3349
3350 status_t
set_thread_priority(thread_id id,int32 priority)3351 set_thread_priority(thread_id id, int32 priority)
3352 {
3353 return thread_set_thread_priority(id, priority, true);
3354 }
3355
3356
3357 status_t
snooze_etc(bigtime_t timeout,int timebase,uint32 flags)3358 snooze_etc(bigtime_t timeout, int timebase, uint32 flags)
3359 {
3360 return common_snooze_etc(timeout, timebase, flags, NULL);
3361 }
3362
3363
3364 /*! snooze() for internal kernel use only; doesn't interrupt on signals. */
3365 status_t
snooze(bigtime_t timeout)3366 snooze(bigtime_t timeout)
3367 {
3368 return snooze_etc(timeout, B_SYSTEM_TIMEBASE, B_RELATIVE_TIMEOUT);
3369 }
3370
3371
3372 /*! snooze_until() for internal kernel use only; doesn't interrupt on
3373 signals.
3374 */
3375 status_t
snooze_until(bigtime_t timeout,int timebase)3376 snooze_until(bigtime_t timeout, int timebase)
3377 {
3378 return snooze_etc(timeout, timebase, B_ABSOLUTE_TIMEOUT);
3379 }
3380
3381
3382 status_t
wait_for_thread(thread_id thread,status_t * _returnCode)3383 wait_for_thread(thread_id thread, status_t *_returnCode)
3384 {
3385 return wait_for_thread_etc(thread, 0, 0, _returnCode);
3386 }
3387
3388
3389 static status_t
thread_suspend_thread(thread_id id,bool kernel)3390 thread_suspend_thread(thread_id id, bool kernel)
3391 {
3392 return thread_send_signal(id, SIGSTOP, SI_USER, B_OK, kernel);
3393 }
3394
3395
3396 status_t
suspend_thread(thread_id id)3397 suspend_thread(thread_id id)
3398 {
3399 return thread_suspend_thread(id, true);
3400 }
3401
3402
3403 static status_t
thread_resume_thread(thread_id id,bool kernel)3404 thread_resume_thread(thread_id id, bool kernel)
3405 {
3406 // Using the kernel internal SIGNAL_CONTINUE_THREAD signal retains
3407 // compatibility to BeOS which documents the combination of suspend_thread()
3408 // and resume_thread() to interrupt threads waiting on semaphores.
3409 return thread_send_signal(id, SIGNAL_CONTINUE_THREAD, SI_USER, B_OK, kernel);
3410 }
3411
3412
3413 status_t
resume_thread(thread_id id)3414 resume_thread(thread_id id)
3415 {
3416 return thread_resume_thread(id, true);
3417 }
3418
3419
3420 thread_id
spawn_kernel_thread(thread_func function,const char * name,int32 priority,void * arg)3421 spawn_kernel_thread(thread_func function, const char *name, int32 priority,
3422 void *arg)
3423 {
3424 return thread_create_thread(
3425 ThreadCreationAttributes(function, name, priority, arg),
3426 true);
3427 }
3428
3429
3430 int
getrlimit(int resource,struct rlimit * rlp)3431 getrlimit(int resource, struct rlimit * rlp)
3432 {
3433 status_t error = common_getrlimit(resource, rlp);
3434 if (error != B_OK) {
3435 errno = error;
3436 return -1;
3437 }
3438
3439 return 0;
3440 }
3441
3442
3443 int
setrlimit(int resource,const struct rlimit * rlp)3444 setrlimit(int resource, const struct rlimit * rlp)
3445 {
3446 status_t error = common_setrlimit(resource, rlp);
3447 if (error != B_OK) {
3448 errno = error;
3449 return -1;
3450 }
3451
3452 return 0;
3453 }
3454
3455
3456 // #pragma mark - syscalls
3457
3458
3459 void
_user_exit_thread(status_t returnValue)3460 _user_exit_thread(status_t returnValue)
3461 {
3462 exit_thread(returnValue);
3463 }
3464
3465
3466 status_t
_user_kill_thread(thread_id thread)3467 _user_kill_thread(thread_id thread)
3468 {
3469 return thread_kill_thread(thread, false);
3470 }
3471
3472
3473 status_t
_user_cancel_thread(thread_id threadID,void (* cancelFunction)(int))3474 _user_cancel_thread(thread_id threadID, void (*cancelFunction)(int))
3475 {
3476 // check the cancel function
3477 if (cancelFunction == NULL || !IS_USER_ADDRESS(cancelFunction))
3478 return B_BAD_VALUE;
3479
3480 // get and lock the thread
3481 Thread* thread = Thread::GetAndLock(threadID);
3482 if (thread == NULL)
3483 return B_BAD_THREAD_ID;
3484 BReference<Thread> threadReference(thread, true);
3485 ThreadLocker threadLocker(thread, true);
3486
3487 // only threads of the same team can be canceled
3488 if (thread->team != thread_get_current_thread()->team)
3489 return B_NOT_ALLOWED;
3490
3491 // set the cancel function
3492 thread->cancel_function = cancelFunction;
3493
3494 // send the cancellation signal to the thread
3495 InterruptsReadSpinLocker teamLocker(thread->team_lock);
3496 SpinLocker locker(thread->team->signal_lock);
3497 return send_signal_to_thread_locked(thread, SIGNAL_CANCEL_THREAD, NULL, 0);
3498 }
3499
3500
3501 status_t
_user_resume_thread(thread_id thread)3502 _user_resume_thread(thread_id thread)
3503 {
3504 return thread_resume_thread(thread, false);
3505 }
3506
3507
3508 status_t
_user_suspend_thread(thread_id thread)3509 _user_suspend_thread(thread_id thread)
3510 {
3511 return thread_suspend_thread(thread, false);
3512 }
3513
3514
3515 status_t
_user_rename_thread(thread_id thread,const char * userName)3516 _user_rename_thread(thread_id thread, const char *userName)
3517 {
3518 char name[B_OS_NAME_LENGTH];
3519
3520 if (!IS_USER_ADDRESS(userName)
3521 || userName == NULL
3522 || user_strlcpy(name, userName, B_OS_NAME_LENGTH) < B_OK)
3523 return B_BAD_ADDRESS;
3524
3525 // rename_thread() forbids thread renames across teams, so we don't
3526 // need a "kernel" flag here.
3527 return rename_thread(thread, name);
3528 }
3529
3530
3531 int32
_user_set_thread_priority(thread_id thread,int32 newPriority)3532 _user_set_thread_priority(thread_id thread, int32 newPriority)
3533 {
3534 return thread_set_thread_priority(thread, newPriority, false);
3535 }
3536
3537
3538 thread_id
_user_spawn_thread(thread_creation_attributes * userAttributes)3539 _user_spawn_thread(thread_creation_attributes* userAttributes)
3540 {
3541 // copy the userland structure to the kernel
3542 char nameBuffer[B_OS_NAME_LENGTH];
3543 ThreadCreationAttributes attributes;
3544 status_t error = attributes.InitFromUserAttributes(userAttributes,
3545 nameBuffer);
3546 if (error != B_OK)
3547 return error;
3548
3549 // create the thread
3550 thread_id threadID = thread_create_thread(attributes, false);
3551
3552 if (threadID >= 0)
3553 user_debug_thread_created(threadID);
3554
3555 return threadID;
3556 }
3557
3558
3559 status_t
_user_snooze_etc(bigtime_t timeout,int timebase,uint32 flags,bigtime_t * userRemainingTime)3560 _user_snooze_etc(bigtime_t timeout, int timebase, uint32 flags,
3561 bigtime_t* userRemainingTime)
3562 {
3563 // We need to store more syscall restart parameters than usual and need a
3564 // somewhat different handling. Hence we can't use
3565 // syscall_restart_handle_timeout_pre() but do the job ourselves.
3566 struct restart_parameters {
3567 bigtime_t timeout;
3568 clockid_t timebase;
3569 uint32 flags;
3570 };
3571
3572 Thread* thread = thread_get_current_thread();
3573
3574 if ((thread->flags & THREAD_FLAGS_SYSCALL_RESTARTED) != 0) {
3575 // The syscall was restarted. Fetch the parameters from the stored
3576 // restart parameters.
3577 restart_parameters* restartParameters
3578 = (restart_parameters*)thread->syscall_restart.parameters;
3579 timeout = restartParameters->timeout;
3580 timebase = restartParameters->timebase;
3581 flags = restartParameters->flags;
3582 } else {
3583 // convert relative timeouts to absolute ones
3584 if ((flags & B_RELATIVE_TIMEOUT) != 0) {
3585 // not restarted yet and the flags indicate a relative timeout
3586
3587 // Make sure we use the system time base, so real-time clock changes
3588 // won't affect our wait.
3589 flags &= ~(uint32)B_TIMEOUT_REAL_TIME_BASE;
3590 if (timebase == CLOCK_REALTIME)
3591 timebase = CLOCK_MONOTONIC;
3592
3593 // get the current time and make the timeout absolute
3594 bigtime_t now;
3595 status_t error = user_timer_get_clock(timebase, now);
3596 if (error != B_OK)
3597 return error;
3598
3599 timeout += now;
3600
3601 // deal with overflow
3602 if (timeout < 0)
3603 timeout = B_INFINITE_TIMEOUT;
3604
3605 flags = (flags & ~B_RELATIVE_TIMEOUT) | B_ABSOLUTE_TIMEOUT;
3606 } else
3607 flags |= B_ABSOLUTE_TIMEOUT;
3608 }
3609
3610 // snooze
3611 bigtime_t remainingTime;
3612 status_t error = common_snooze_etc(timeout, timebase,
3613 flags | B_CAN_INTERRUPT | B_CHECK_PERMISSION,
3614 userRemainingTime != NULL ? &remainingTime : NULL);
3615
3616 // If interrupted, copy the remaining time back to userland and prepare the
3617 // syscall restart.
3618 if (error == B_INTERRUPTED) {
3619 if (userRemainingTime != NULL
3620 && (!IS_USER_ADDRESS(userRemainingTime)
3621 || user_memcpy(userRemainingTime, &remainingTime,
3622 sizeof(remainingTime)) != B_OK)) {
3623 return B_BAD_ADDRESS;
3624 }
3625
3626 // store the normalized values in the restart parameters
3627 restart_parameters* restartParameters
3628 = (restart_parameters*)thread->syscall_restart.parameters;
3629 restartParameters->timeout = timeout;
3630 restartParameters->timebase = timebase;
3631 restartParameters->flags = flags;
3632
3633 // restart the syscall, if possible
3634 atomic_or(&thread->flags, THREAD_FLAGS_RESTART_SYSCALL);
3635 }
3636
3637 return error;
3638 }
3639
3640
3641 void
_user_thread_yield(void)3642 _user_thread_yield(void)
3643 {
3644 thread_yield();
3645 }
3646
3647
3648 status_t
_user_get_thread_info(thread_id id,thread_info * userInfo)3649 _user_get_thread_info(thread_id id, thread_info *userInfo)
3650 {
3651 thread_info info;
3652 status_t status;
3653
3654 if (!IS_USER_ADDRESS(userInfo))
3655 return B_BAD_ADDRESS;
3656
3657 status = _get_thread_info(id, &info, sizeof(thread_info));
3658
3659 if (status >= B_OK
3660 && user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
3661 return B_BAD_ADDRESS;
3662
3663 return status;
3664 }
3665
3666
3667 status_t
_user_get_next_thread_info(team_id team,int32 * userCookie,thread_info * userInfo)3668 _user_get_next_thread_info(team_id team, int32 *userCookie,
3669 thread_info *userInfo)
3670 {
3671 status_t status;
3672 thread_info info;
3673 int32 cookie;
3674
3675 if (!IS_USER_ADDRESS(userCookie) || !IS_USER_ADDRESS(userInfo)
3676 || user_memcpy(&cookie, userCookie, sizeof(int32)) < B_OK)
3677 return B_BAD_ADDRESS;
3678
3679 status = _get_next_thread_info(team, &cookie, &info, sizeof(thread_info));
3680 if (status < B_OK)
3681 return status;
3682
3683 if (user_memcpy(userCookie, &cookie, sizeof(int32)) < B_OK
3684 || user_memcpy(userInfo, &info, sizeof(thread_info)) < B_OK)
3685 return B_BAD_ADDRESS;
3686
3687 return status;
3688 }
3689
3690
3691 thread_id
_user_find_thread(const char * userName)3692 _user_find_thread(const char *userName)
3693 {
3694 char name[B_OS_NAME_LENGTH];
3695
3696 if (userName == NULL)
3697 return find_thread(NULL);
3698
3699 if (!IS_USER_ADDRESS(userName)
3700 || user_strlcpy(name, userName, sizeof(name)) < B_OK)
3701 return B_BAD_ADDRESS;
3702
3703 return find_thread(name);
3704 }
3705
3706
3707 status_t
_user_wait_for_thread(thread_id id,status_t * userReturnCode)3708 _user_wait_for_thread(thread_id id, status_t *userReturnCode)
3709 {
3710 status_t returnCode;
3711 status_t status;
3712
3713 if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
3714 return B_BAD_ADDRESS;
3715
3716 status = wait_for_thread_etc(id, B_CAN_INTERRUPT, 0, &returnCode);
3717
3718 if (status == B_OK && userReturnCode != NULL
3719 && user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
3720 return B_BAD_ADDRESS;
3721 }
3722
3723 return syscall_restart_handle_post(status);
3724 }
3725
3726
3727 status_t
_user_wait_for_thread_etc(thread_id id,uint32 flags,bigtime_t timeout,status_t * userReturnCode)3728 _user_wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout, status_t *userReturnCode)
3729 {
3730 status_t returnCode;
3731 status_t status;
3732
3733 if (userReturnCode != NULL && !IS_USER_ADDRESS(userReturnCode))
3734 return B_BAD_ADDRESS;
3735
3736 syscall_restart_handle_timeout_pre(flags, timeout);
3737
3738 status = wait_for_thread_etc(id, flags | B_CAN_INTERRUPT, timeout, &returnCode);
3739
3740 if (status == B_OK && userReturnCode != NULL
3741 && user_memcpy(userReturnCode, &returnCode, sizeof(status_t)) < B_OK) {
3742 return B_BAD_ADDRESS;
3743 }
3744
3745 return syscall_restart_handle_timeout_post(status, timeout);
3746 }
3747
3748
3749 bool
_user_has_data(thread_id thread)3750 _user_has_data(thread_id thread)
3751 {
3752 return thread_has_data(thread, false);
3753 }
3754
3755
3756 status_t
_user_send_data(thread_id thread,int32 code,const void * buffer,size_t bufferSize)3757 _user_send_data(thread_id thread, int32 code, const void *buffer,
3758 size_t bufferSize)
3759 {
3760 if (buffer != NULL && !IS_USER_ADDRESS(buffer))
3761 return B_BAD_ADDRESS;
3762
3763 return send_data_etc(thread, code, buffer, bufferSize,
3764 B_KILL_CAN_INTERRUPT);
3765 // supports userland buffers
3766 }
3767
3768
3769 status_t
_user_receive_data(thread_id * _userSender,void * buffer,size_t bufferSize)3770 _user_receive_data(thread_id *_userSender, void *buffer, size_t bufferSize)
3771 {
3772 thread_id sender;
3773 status_t code;
3774
3775 if ((!IS_USER_ADDRESS(_userSender) && _userSender != NULL)
3776 || (!IS_USER_ADDRESS(buffer) && buffer != NULL)) {
3777 return B_BAD_ADDRESS;
3778 }
3779
3780 code = receive_data_etc(&sender, buffer, bufferSize, B_KILL_CAN_INTERRUPT);
3781 // supports userland buffers
3782
3783 if (_userSender != NULL)
3784 if (user_memcpy(_userSender, &sender, sizeof(thread_id)) < B_OK)
3785 return B_BAD_ADDRESS;
3786
3787 return code;
3788 }
3789
3790
3791 status_t
_user_block_thread(uint32 flags,bigtime_t timeout)3792 _user_block_thread(uint32 flags, bigtime_t timeout)
3793 {
3794 syscall_restart_handle_timeout_pre(flags, timeout);
3795 flags |= B_CAN_INTERRUPT;
3796
3797 Thread* thread = thread_get_current_thread();
3798 ThreadLocker threadLocker(thread);
3799
3800 // check, if already done
3801 status_t waitStatus;
3802 if (user_memcpy(&waitStatus, &thread->user_thread->wait_status,
3803 sizeof(waitStatus)) < B_OK) {
3804 return B_BAD_ADDRESS;
3805 }
3806 if (waitStatus <= 0)
3807 return waitStatus;
3808
3809 // nope, so wait
3810 // Note: GCC 13 marks the following call as potentially overflowing, since it thinks `thread`
3811 // may be `nullptr`. This cannot be the case in reality, therefore ignore this specific
3812 // error.
3813 #pragma GCC diagnostic push
3814 #pragma GCC diagnostic ignored "-Wstringop-overflow"
3815 thread_prepare_to_block(thread, flags, THREAD_BLOCK_TYPE_USER, NULL);
3816 #pragma GCC diagnostic pop
3817
3818 threadLocker.Unlock();
3819
3820 status_t status = thread_block_with_timeout(flags, timeout);
3821
3822 threadLocker.Lock();
3823
3824 // Interruptions or timeouts can race with other threads unblocking us.
3825 // Favor a wake-up by another thread, i.e. if someone changed the wait
3826 // status, use that.
3827 status_t oldStatus;
3828 if (user_memcpy(&oldStatus, &thread->user_thread->wait_status,
3829 sizeof(oldStatus)) < B_OK) {
3830 return B_BAD_ADDRESS;
3831 }
3832 if (oldStatus > 0) {
3833 if (user_memcpy(&thread->user_thread->wait_status, &status,
3834 sizeof(status)) < B_OK) {
3835 return B_BAD_ADDRESS;
3836 }
3837 } else {
3838 status = oldStatus;
3839 }
3840
3841 threadLocker.Unlock();
3842
3843 return syscall_restart_handle_timeout_post(status, timeout);
3844 }
3845
3846
3847 status_t
_user_unblock_thread(thread_id threadID,status_t status)3848 _user_unblock_thread(thread_id threadID, status_t status)
3849 {
3850 status_t error = user_unblock_thread(threadID, status);
3851
3852 if (error == B_OK)
3853 scheduler_reschedule_if_necessary();
3854
3855 return error;
3856 }
3857
3858
3859 status_t
_user_unblock_threads(thread_id * userThreads,uint32 count,status_t status)3860 _user_unblock_threads(thread_id* userThreads, uint32 count, status_t status)
3861 {
3862 enum {
3863 MAX_USER_THREADS_TO_UNBLOCK = 128
3864 };
3865
3866 if (userThreads == NULL || !IS_USER_ADDRESS(userThreads))
3867 return B_BAD_ADDRESS;
3868 if (count > MAX_USER_THREADS_TO_UNBLOCK)
3869 return B_BAD_VALUE;
3870
3871 thread_id threads[MAX_USER_THREADS_TO_UNBLOCK];
3872 if (user_memcpy(threads, userThreads, count * sizeof(thread_id)) != B_OK)
3873 return B_BAD_ADDRESS;
3874
3875 for (uint32 i = 0; i < count; i++)
3876 user_unblock_thread(threads[i], status);
3877
3878 scheduler_reschedule_if_necessary();
3879
3880 return B_OK;
3881 }
3882
3883
3884 // TODO: the following two functions don't belong here
3885
3886
3887 int
_user_getrlimit(int resource,struct rlimit * urlp)3888 _user_getrlimit(int resource, struct rlimit *urlp)
3889 {
3890 struct rlimit rl;
3891 int ret;
3892
3893 if (urlp == NULL)
3894 return EINVAL;
3895
3896 if (!IS_USER_ADDRESS(urlp))
3897 return B_BAD_ADDRESS;
3898
3899 ret = common_getrlimit(resource, &rl);
3900
3901 if (ret == 0) {
3902 ret = user_memcpy(urlp, &rl, sizeof(struct rlimit));
3903 if (ret < 0)
3904 return ret;
3905
3906 return 0;
3907 }
3908
3909 return ret;
3910 }
3911
3912
3913 int
_user_setrlimit(int resource,const struct rlimit * userResourceLimit)3914 _user_setrlimit(int resource, const struct rlimit *userResourceLimit)
3915 {
3916 struct rlimit resourceLimit;
3917
3918 if (userResourceLimit == NULL)
3919 return EINVAL;
3920
3921 if (!IS_USER_ADDRESS(userResourceLimit)
3922 || user_memcpy(&resourceLimit, userResourceLimit,
3923 sizeof(struct rlimit)) < B_OK)
3924 return B_BAD_ADDRESS;
3925
3926 return common_setrlimit(resource, &resourceLimit);
3927 }
3928
3929
3930 int
_user_get_cpu()3931 _user_get_cpu()
3932 {
3933 Thread* thread = thread_get_current_thread();
3934 return thread->cpu->cpu_num;
3935 }
3936
3937
3938 status_t
_user_get_thread_affinity(thread_id id,void * userMask,size_t size)3939 _user_get_thread_affinity(thread_id id, void* userMask, size_t size)
3940 {
3941 if (userMask == NULL || id < B_OK)
3942 return B_BAD_VALUE;
3943
3944 if (!IS_USER_ADDRESS(userMask))
3945 return B_BAD_ADDRESS;
3946
3947 CPUSet mask;
3948
3949 if (id == 0)
3950 id = thread_get_current_thread_id();
3951 // get the thread
3952 Thread* thread = Thread::GetAndLock(id);
3953 if (thread == NULL)
3954 return B_BAD_THREAD_ID;
3955 BReference<Thread> threadReference(thread, true);
3956 ThreadLocker threadLocker(thread, true);
3957 memcpy(&mask, &thread->cpumask, sizeof(mask));
3958
3959 if (user_memcpy(userMask, &mask, min_c(sizeof(mask), size)) < B_OK)
3960 return B_BAD_ADDRESS;
3961
3962 return B_OK;
3963 }
3964
3965 status_t
_user_set_thread_affinity(thread_id id,const void * userMask,size_t size)3966 _user_set_thread_affinity(thread_id id, const void* userMask, size_t size)
3967 {
3968 if (userMask == NULL || id < B_OK || size < sizeof(CPUSet))
3969 return B_BAD_VALUE;
3970
3971 if (!IS_USER_ADDRESS(userMask))
3972 return B_BAD_ADDRESS;
3973
3974 CPUSet mask;
3975 if (user_memcpy(&mask, userMask, min_c(sizeof(CPUSet), size)) < B_OK)
3976 return B_BAD_ADDRESS;
3977
3978 CPUSet cpus;
3979 cpus.SetAll();
3980 for (int i = 0; i < smp_get_num_cpus(); i++)
3981 cpus.ClearBit(i);
3982 if (mask.Matches(cpus))
3983 return B_BAD_VALUE;
3984
3985 if (id == 0)
3986 id = thread_get_current_thread_id();
3987
3988 // get the thread
3989 Thread* thread = Thread::GetAndLock(id);
3990 if (thread == NULL)
3991 return B_BAD_THREAD_ID;
3992 BReference<Thread> threadReference(thread, true);
3993 ThreadLocker threadLocker(thread, true);
3994 memcpy(&thread->cpumask, &mask, sizeof(mask));
3995
3996 // check if running on masked cpu
3997 if (!thread->cpumask.GetBit(thread->cpu->cpu_num))
3998 thread_yield();
3999
4000 return B_OK;
4001 }
4002