xref: /haiku/src/system/kernel/locks/user_mutex.cpp (revision fc7456e9b1ec38c941134ed6d01c438cf289381e)
1 /*
2  * Copyright 2023, Haiku, Inc. All rights reserved.
3  * Copyright 2018, Jérôme Duval, jerome.duval@gmail.com.
4  * Copyright 2015, Hamish Morrison, hamishm53@gmail.com.
5  * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
6  * Distributed under the terms of the MIT License.
7  */
8 
9 
10 #include <user_mutex.h>
11 #include <user_mutex_defs.h>
12 
13 #include <condition_variable.h>
14 #include <kernel.h>
15 #include <lock.h>
16 #include <smp.h>
17 #include <syscall_restart.h>
18 #include <util/AutoLock.h>
19 #include <util/ThreadAutoLock.h>
20 #include <util/OpenHashTable.h>
21 #include <vm/vm.h>
22 #include <vm/VMArea.h>
23 #include <arch/generic/user_memory.h>
24 
25 
26 /*! One UserMutexEntry corresponds to one mutex address.
27  *
28  * The mutex's "waiting" state is controlled by the rw_lock: a waiter acquires
29  * a "read" lock before initiating a wait, and an unblocker acquires a "write"
30  * lock. That way, unblockers can be sure that no waiters will start waiting
31  * during unblock, and they can thus safely (without races) unset WAITING.
32  */
33 struct UserMutexEntry {
34 	generic_addr_t		address;
35 	UserMutexEntry*		hash_next;
36 	int32				ref_count;
37 
38 	rw_lock				lock;
39 	ConditionVariable	condition;
40 };
41 
42 struct UserMutexHashDefinition {
43 	typedef generic_addr_t	KeyType;
44 	typedef UserMutexEntry	ValueType;
45 
46 	size_t HashKey(generic_addr_t key) const
47 	{
48 		return key >> 2;
49 	}
50 
51 	size_t Hash(const UserMutexEntry* value) const
52 	{
53 		return HashKey(value->address);
54 	}
55 
56 	bool Compare(generic_addr_t key, const UserMutexEntry* value) const
57 	{
58 		return value->address == key;
59 	}
60 
61 	UserMutexEntry*& GetLink(UserMutexEntry* value) const
62 	{
63 		return value->hash_next;
64 	}
65 };
66 
67 typedef BOpenHashTable<UserMutexHashDefinition> UserMutexTable;
68 
69 
70 struct user_mutex_context {
71 	UserMutexTable table;
72 	rw_lock lock;
73 };
74 static user_mutex_context sSharedUserMutexContext;
75 static const char* kUserMutexEntryType = "umtx entry";
76 
77 
78 // #pragma mark - user atomics
79 
80 
81 static int32
82 user_atomic_or(int32* value, int32 orValue, bool isWired)
83 {
84 	int32 result;
85 	if (isWired) {
86 		arch_cpu_enable_user_access();
87 		result = atomic_or(value, orValue);
88 		arch_cpu_disable_user_access();
89 		return result;
90 	}
91 
92 	return user_access([=, &result] {
93 		result = atomic_or(value, orValue);
94 	}) ? result : INT32_MIN;
95 }
96 
97 
98 static int32
99 user_atomic_and(int32* value, int32 andValue, bool isWired)
100 {
101 	int32 result;
102 	if (isWired) {
103 		arch_cpu_enable_user_access();
104 		result = atomic_and(value, andValue);
105 		arch_cpu_disable_user_access();
106 		return result;
107 	}
108 
109 	return user_access([=, &result] {
110 		result = atomic_and(value, andValue);
111 	}) ? result : INT32_MIN;
112 }
113 
114 
115 static int32
116 user_atomic_get(int32* value, bool isWired)
117 {
118 	int32 result;
119 	if (isWired) {
120 		arch_cpu_enable_user_access();
121 		result = atomic_get(value);
122 		arch_cpu_disable_user_access();
123 		return result;
124 	}
125 
126 	return user_access([=, &result] {
127 		result = atomic_get(value);
128 	}) ? result : INT32_MIN;
129 }
130 
131 
132 static int32
133 user_atomic_test_and_set(int32* value, int32 newValue, int32 testAgainst,
134 	bool isWired)
135 {
136 	int32 result;
137 	if (isWired) {
138 		arch_cpu_enable_user_access();
139 		result = atomic_test_and_set(value, newValue, testAgainst);
140 		arch_cpu_disable_user_access();
141 		return result;
142 	}
143 
144 	return user_access([=, &result] {
145 		result = atomic_test_and_set(value, newValue, testAgainst);
146 	}) ? result : INT32_MIN;
147 }
148 
149 
150 // #pragma mark - user mutex context
151 
152 
153 static int
154 dump_user_mutex(int argc, char** argv)
155 {
156 	if (argc != 2) {
157 		print_debugger_command_usage(argv[0]);
158 		return 0;
159 	}
160 
161 	addr_t threadID = parse_expression(argv[1]);
162 	if (threadID == 0)
163 		return 0;
164 
165 	Thread* thread = Thread::GetDebug(threadID);
166 	if (thread == NULL) {
167 		kprintf("no such thread\n");
168 		return 0;
169 	}
170 
171 	if (thread->wait.type != THREAD_BLOCK_TYPE_CONDITION_VARIABLE) {
172 		kprintf("thread is not blocked on cvar (thus not user_mutex)\n");
173 		return 0;
174 	}
175 
176 	ConditionVariable* variable = (ConditionVariable*)thread->wait.object;
177 	if (variable->ObjectType() != kUserMutexEntryType) {
178 		kprintf("thread is not blocked on user_mutex\n");
179 		return 0;
180 	}
181 
182 	UserMutexEntry* entry = (UserMutexEntry*)variable->Object();
183 
184 	const bool physical = (sSharedUserMutexContext.table.Lookup(entry->address) == entry);
185 	kprintf("user mutex entry %p\n", entry);
186 	kprintf("  address:  0x%" B_PRIxPHYSADDR " (%s)\n", entry->address,
187 		physical ? "physical" : "virtual");
188 	kprintf("  refcount: %" B_PRId32 "\n", entry->ref_count);
189 	kprintf("  lock:     %p\n", &entry->lock);
190 
191 	int32 mutex = 0;
192 	status_t status = B_ERROR;
193 	if (!physical) {
194 		status = debug_memcpy(thread->team->id, &mutex,
195 			(void*)entry->address, sizeof(mutex));
196 	}
197 
198 	if (status == B_OK)
199 		kprintf("  mutex:    0x%" B_PRIx32 "\n", mutex);
200 
201 	entry->condition.Dump();
202 
203 	return 0;
204 }
205 
206 
207 void
208 user_mutex_init()
209 {
210 	sSharedUserMutexContext.lock = RW_LOCK_INITIALIZER("shared user mutex table");
211 	if (sSharedUserMutexContext.table.Init() != B_OK)
212 		panic("user_mutex_init(): Failed to init table!");
213 
214 	add_debugger_command_etc("user_mutex", &dump_user_mutex,
215 		"Dump user-mutex info",
216 		"<thread>\n"
217 		"Prints info about the user-mutex a thread is blocked on.\n"
218 		"  <thread>  - Thread ID that is blocked on a user mutex\n", 0);
219 }
220 
221 
222 struct user_mutex_context*
223 get_team_user_mutex_context()
224 {
225 	struct user_mutex_context* context =
226 		thread_get_current_thread()->team->user_mutex_context;
227 	if (context != NULL)
228 		return context;
229 
230 	Team* team = thread_get_current_thread()->team;
231 	TeamLocker teamLocker(team);
232 	if (team->user_mutex_context != NULL)
233 		return team->user_mutex_context;
234 
235 	context = new(std::nothrow) user_mutex_context;
236 	if (context == NULL)
237 		return NULL;
238 
239 	context->lock = RW_LOCK_INITIALIZER("user mutex table");
240 	if (context->table.Init() != B_OK) {
241 		delete context;
242 		return NULL;
243 	}
244 
245 	team->user_mutex_context = context;
246 	return context;
247 }
248 
249 
250 void
251 delete_user_mutex_context(struct user_mutex_context* context)
252 {
253 	if (context == NULL)
254 		return;
255 
256 	// This should be empty at this point in team destruction.
257 	ASSERT(context->table.IsEmpty());
258 	delete context;
259 }
260 
261 
262 static UserMutexEntry*
263 get_user_mutex_entry(struct user_mutex_context* context,
264 	generic_addr_t address, bool noInsert = false, bool alreadyLocked = false)
265 {
266 	ReadLocker tableReadLocker;
267 	if (!alreadyLocked)
268 		tableReadLocker.SetTo(context->lock, false);
269 
270 	UserMutexEntry* entry = context->table.Lookup(address);
271 	if (entry != NULL) {
272 		atomic_add(&entry->ref_count, 1);
273 		return entry;
274 	} else if (noInsert)
275 		return entry;
276 
277 	tableReadLocker.Unlock();
278 	WriteLocker tableWriteLocker(context->lock);
279 
280 	entry = context->table.Lookup(address);
281 	if (entry != NULL) {
282 		atomic_add(&entry->ref_count, 1);
283 		return entry;
284 	}
285 
286 	entry = new(std::nothrow) UserMutexEntry;
287 	if (entry == NULL)
288 		return entry;
289 
290 	entry->address = address;
291 	entry->ref_count = 1;
292 	rw_lock_init(&entry->lock, "UserMutexEntry lock");
293 	entry->condition.Init(entry, kUserMutexEntryType);
294 
295 	context->table.Insert(entry);
296 	return entry;
297 }
298 
299 
300 static void
301 put_user_mutex_entry(struct user_mutex_context* context, UserMutexEntry* entry)
302 {
303 	if (entry == NULL)
304 		return;
305 
306 	const generic_addr_t address = entry->address;
307 	if (atomic_add(&entry->ref_count, -1) != 1)
308 		return;
309 
310 	WriteLocker tableWriteLocker(context->lock);
311 
312 	// Was it removed & deleted while we were waiting for the lock?
313 	if (context->table.Lookup(address) != entry)
314 		return;
315 
316 	// Or did someone else acquire a reference to it?
317 	if (atomic_get(&entry->ref_count) > 0)
318 		return;
319 
320 	context->table.Remove(entry);
321 	tableWriteLocker.Unlock();
322 
323 	rw_lock_destroy(&entry->lock);
324 	delete entry;
325 }
326 
327 
328 static status_t
329 user_mutex_wait_locked(UserMutexEntry* entry,
330 	uint32 flags, bigtime_t timeout, ReadLocker& locker)
331 {
332 	ConditionVariableEntry waiter;
333 	entry->condition.Add(&waiter);
334 	locker.Unlock();
335 
336 	return waiter.Wait(flags, timeout);
337 }
338 
339 
340 static bool
341 user_mutex_prepare_to_lock(UserMutexEntry* entry, int32* mutex, bool isWired)
342 {
343 	ASSERT_READ_LOCKED_RW_LOCK(&entry->lock);
344 
345 	int32 oldValue = user_atomic_or(mutex,
346 		B_USER_MUTEX_LOCKED | B_USER_MUTEX_WAITING, isWired);
347 	if ((oldValue & B_USER_MUTEX_LOCKED) == 0
348 			|| (oldValue & B_USER_MUTEX_DISABLED) != 0) {
349 		// possibly unset waiting flag
350 		if ((oldValue & B_USER_MUTEX_WAITING) == 0) {
351 			rw_lock_read_unlock(&entry->lock);
352 			rw_lock_write_lock(&entry->lock);
353 			if (entry->condition.EntriesCount() == 0)
354 				user_atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING, isWired);
355 			rw_lock_write_unlock(&entry->lock);
356 			rw_lock_read_lock(&entry->lock);
357 		}
358 		return true;
359 	}
360 
361 	return false;
362 }
363 
364 
365 static status_t
366 user_mutex_lock_locked(UserMutexEntry* entry, int32* mutex,
367 	uint32 flags, bigtime_t timeout, ReadLocker& locker, bool isWired)
368 {
369 	if (user_mutex_prepare_to_lock(entry, mutex, isWired))
370 		return B_OK;
371 
372 	status_t error = user_mutex_wait_locked(entry, flags, timeout, locker);
373 
374 	// possibly unset waiting flag
375 	if (error != B_OK && entry->condition.EntriesCount() == 0) {
376 		WriteLocker writeLocker(entry->lock);
377 		if (entry->condition.EntriesCount() == 0)
378 			user_atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING, isWired);
379 	}
380 
381 	return error;
382 }
383 
384 
385 static void
386 user_mutex_unblock(UserMutexEntry* entry, int32* mutex, uint32 flags, bool isWired)
387 {
388 	WriteLocker entryLocker(entry->lock);
389 	if (entry->condition.EntriesCount() == 0) {
390 		// Nobody is actually waiting at present.
391 		user_atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING, isWired);
392 		return;
393 	}
394 
395 	int32 oldValue = 0;
396 	if ((flags & B_USER_MUTEX_UNBLOCK_ALL) == 0) {
397 		// This is not merely an unblock, but a hand-off.
398 		oldValue = user_atomic_or(mutex, B_USER_MUTEX_LOCKED, isWired);
399 		if ((oldValue & B_USER_MUTEX_LOCKED) != 0)
400 			return;
401 	}
402 
403 	if ((flags & B_USER_MUTEX_UNBLOCK_ALL) != 0
404 			|| (oldValue & B_USER_MUTEX_DISABLED) != 0) {
405 		// unblock all waiting threads
406 		entry->condition.NotifyAll(B_OK);
407 	} else {
408 		if (!entry->condition.NotifyOne(B_OK))
409 			user_atomic_and(mutex, ~(int32)B_USER_MUTEX_LOCKED, isWired);
410 	}
411 
412 	if (entry->condition.EntriesCount() == 0)
413 		user_atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING, isWired);
414 }
415 
416 
417 static status_t
418 user_mutex_sem_acquire_locked(UserMutexEntry* entry, int32* sem,
419 	uint32 flags, bigtime_t timeout, ReadLocker& locker, bool isWired)
420 {
421 	// The semaphore may have been released in the meantime, and we also
422 	// need to mark it as contended if it isn't already.
423 	int32 oldValue = user_atomic_get(sem, isWired);
424 	while (oldValue > -1) {
425 		int32 value = user_atomic_test_and_set(sem, oldValue - 1, oldValue, isWired);
426 		if (value == oldValue && value > 0)
427 			return B_OK;
428 		oldValue = value;
429 	}
430 
431 	return user_mutex_wait_locked(entry, flags,
432 		timeout, locker);
433 }
434 
435 
436 static void
437 user_mutex_sem_release(UserMutexEntry* entry, int32* sem, bool isWired)
438 {
439 	WriteLocker entryLocker(entry->lock);
440 	if (entry->condition.NotifyOne(B_OK) == 0) {
441 		// no waiters - mark as uncontended and release
442 		int32 oldValue = user_atomic_get(sem, isWired);
443 		while (true) {
444 			int32 inc = oldValue < 0 ? 2 : 1;
445 			int32 value = user_atomic_test_and_set(sem, oldValue + inc, oldValue, isWired);
446 			if (value == oldValue)
447 				return;
448 			oldValue = value;
449 		}
450 	}
451 
452 	if (entry->condition.EntriesCount() == 0) {
453 		// mark the semaphore uncontended
454 		user_atomic_test_and_set(sem, 0, -1, isWired);
455 	}
456 }
457 
458 
459 // #pragma mark - syscalls
460 
461 
462 struct UserMutexContextFetcher {
463 	UserMutexContextFetcher(int32* mutex, uint32 flags)
464 		:
465 		fInitStatus(B_OK),
466 		fShared((flags & B_USER_MUTEX_SHARED) != 0),
467 		fAddress(0)
468 	{
469 		if (!fShared) {
470 			fContext = get_team_user_mutex_context();
471 			if (fContext == NULL) {
472 				fInitStatus = B_NO_MEMORY;
473 				return;
474 			}
475 
476 			fAddress = (addr_t)mutex;
477 		} else {
478 			fContext = &sSharedUserMutexContext;
479 
480 			// wire the page and get the physical address
481 			fInitStatus = vm_wire_page(B_CURRENT_TEAM, (addr_t)mutex, true,
482 				&fWiringInfo);
483 			if (fInitStatus != B_OK)
484 				return;
485 			fAddress = fWiringInfo.physicalAddress;
486 		}
487 	}
488 
489 	~UserMutexContextFetcher()
490 	{
491 		if (fInitStatus != B_OK)
492 			return;
493 
494 		if (fShared)
495 			vm_unwire_page(&fWiringInfo);
496 	}
497 
498 	status_t InitCheck() const
499 		{ return fInitStatus; }
500 
501 	struct user_mutex_context* Context() const
502 		{ return fContext; }
503 
504 	generic_addr_t Address() const
505 		{ return fAddress; }
506 
507 	bool IsWired() const
508 		{ return fShared; }
509 
510 private:
511 	status_t fInitStatus;
512 	bool fShared;
513 	struct user_mutex_context* fContext;
514 	VMPageWiringInfo fWiringInfo;
515 	generic_addr_t fAddress;
516 };
517 
518 
519 static status_t
520 user_mutex_lock(int32* mutex, const char* name, uint32 flags, bigtime_t timeout)
521 {
522 	UserMutexContextFetcher contextFetcher(mutex, flags);
523 	if (contextFetcher.InitCheck() != B_OK)
524 		return contextFetcher.InitCheck();
525 
526 	// get the lock
527 	UserMutexEntry* entry = get_user_mutex_entry(contextFetcher.Context(),
528 		contextFetcher.Address());
529 	if (entry == NULL)
530 		return B_NO_MEMORY;
531 	status_t error = B_OK;
532 	{
533 		ReadLocker entryLocker(entry->lock);
534 		error = user_mutex_lock_locked(entry, mutex,
535 			flags, timeout, entryLocker, contextFetcher.IsWired());
536 	}
537 	put_user_mutex_entry(contextFetcher.Context(), entry);
538 
539 	return error;
540 }
541 
542 
543 static status_t
544 user_mutex_switch_lock(int32* fromMutex, uint32 fromFlags,
545 	int32* toMutex, const char* name, uint32 toFlags, bigtime_t timeout)
546 {
547 	UserMutexContextFetcher fromFetcher(fromMutex, fromFlags);
548 	if (fromFetcher.InitCheck() != B_OK)
549 		return fromFetcher.InitCheck();
550 
551 	UserMutexContextFetcher toFetcher(toMutex, toFlags);
552 	if (toFetcher.InitCheck() != B_OK)
553 		return toFetcher.InitCheck();
554 
555 	// unlock the first mutex and lock the second one
556 	UserMutexEntry* fromEntry = NULL,
557 		*toEntry = get_user_mutex_entry(toFetcher.Context(), toFetcher.Address());
558 	if (toEntry == NULL)
559 		return B_NO_MEMORY;
560 	status_t error = B_OK;
561 	{
562 		ConditionVariableEntry waiter;
563 
564 		bool alreadyLocked = false;
565 		{
566 			ReadLocker entryLocker(toEntry->lock);
567 			alreadyLocked = user_mutex_prepare_to_lock(toEntry, toMutex,
568 				toFetcher.IsWired());
569 			if (!alreadyLocked)
570 				toEntry->condition.Add(&waiter);
571 		}
572 
573 		const int32 oldValue = user_atomic_and(fromMutex, ~(int32)B_USER_MUTEX_LOCKED,
574 			fromFetcher.IsWired());
575 		if ((oldValue & B_USER_MUTEX_WAITING) != 0) {
576 			fromEntry = get_user_mutex_entry(fromFetcher.Context(),
577 				fromFetcher.Address(), true);
578 			 if (fromEntry != NULL) {
579 				 user_mutex_unblock(fromEntry, fromMutex, fromFlags,
580 					 fromFetcher.IsWired());
581 			 }
582 		}
583 
584 		if (!alreadyLocked)
585 			error = waiter.Wait(toFlags, timeout);
586 	}
587 	put_user_mutex_entry(fromFetcher.Context(), fromEntry);
588 	put_user_mutex_entry(toFetcher.Context(), toEntry);
589 
590 	return error;
591 }
592 
593 
594 status_t
595 _user_mutex_lock(int32* mutex, const char* name, uint32 flags,
596 	bigtime_t timeout)
597 {
598 	if (mutex == NULL || !IS_USER_ADDRESS(mutex) || (addr_t)mutex % 4 != 0)
599 		return B_BAD_ADDRESS;
600 
601 	syscall_restart_handle_timeout_pre(flags, timeout);
602 
603 	status_t error = user_mutex_lock(mutex, name, flags | B_CAN_INTERRUPT,
604 		timeout);
605 
606 	return syscall_restart_handle_timeout_post(error, timeout);
607 }
608 
609 
610 status_t
611 _user_mutex_unblock(int32* mutex, uint32 flags)
612 {
613 	if (mutex == NULL || !IS_USER_ADDRESS(mutex) || (addr_t)mutex % 4 != 0)
614 		return B_BAD_ADDRESS;
615 
616 	UserMutexContextFetcher contextFetcher(mutex, flags);
617 	if (contextFetcher.InitCheck() != B_OK)
618 		return contextFetcher.InitCheck();
619 	struct user_mutex_context* context = contextFetcher.Context();
620 
621 	// In the case where there is no entry, we must hold the read lock until we
622 	// unset WAITING, because otherwise some other thread could initiate a wait.
623 	ReadLocker tableReadLocker(context->lock);
624 	UserMutexEntry* entry = get_user_mutex_entry(context,
625 		contextFetcher.Address(), true, true);
626 	if (entry == NULL) {
627 		user_atomic_and(mutex, ~(int32)B_USER_MUTEX_WAITING, contextFetcher.IsWired());
628 		tableReadLocker.Unlock();
629 	} else {
630 		tableReadLocker.Unlock();
631 		user_mutex_unblock(entry, mutex, flags, contextFetcher.IsWired());
632 	}
633 	put_user_mutex_entry(context, entry);
634 
635 	return B_OK;
636 }
637 
638 
639 status_t
640 _user_mutex_switch_lock(int32* fromMutex, uint32 fromFlags,
641 	int32* toMutex, const char* name, uint32 toFlags, bigtime_t timeout)
642 {
643 	if (fromMutex == NULL || !IS_USER_ADDRESS(fromMutex)
644 			|| (addr_t)fromMutex % 4 != 0 || toMutex == NULL
645 			|| !IS_USER_ADDRESS(toMutex) || (addr_t)toMutex % 4 != 0) {
646 		return B_BAD_ADDRESS;
647 	}
648 
649 	return user_mutex_switch_lock(fromMutex, fromFlags, toMutex, name,
650 		toFlags | B_CAN_INTERRUPT, timeout);
651 }
652 
653 
654 status_t
655 _user_mutex_sem_acquire(int32* sem, const char* name, uint32 flags,
656 	bigtime_t timeout)
657 {
658 	if (sem == NULL || !IS_USER_ADDRESS(sem) || (addr_t)sem % 4 != 0)
659 		return B_BAD_ADDRESS;
660 
661 	syscall_restart_handle_timeout_pre(flags, timeout);
662 
663 	UserMutexContextFetcher contextFetcher(sem, flags);
664 	if (contextFetcher.InitCheck() != B_OK)
665 		return contextFetcher.InitCheck();
666 	struct user_mutex_context* context = contextFetcher.Context();
667 
668 	UserMutexEntry* entry = get_user_mutex_entry(context, contextFetcher.Address());
669 	if (entry == NULL)
670 		return B_NO_MEMORY;
671 	status_t error;
672 	{
673 		ReadLocker entryLocker(entry->lock);
674 		error = user_mutex_sem_acquire_locked(entry, sem,
675 			flags | B_CAN_INTERRUPT, timeout, entryLocker, contextFetcher.IsWired());
676 	}
677 	put_user_mutex_entry(context, entry);
678 
679 	return syscall_restart_handle_timeout_post(error, timeout);
680 }
681 
682 
683 status_t
684 _user_mutex_sem_release(int32* sem, uint32 flags)
685 {
686 	if (sem == NULL || !IS_USER_ADDRESS(sem) || (addr_t)sem % 4 != 0)
687 		return B_BAD_ADDRESS;
688 
689 	UserMutexContextFetcher contextFetcher(sem, flags);
690 	if (contextFetcher.InitCheck() != B_OK)
691 		return contextFetcher.InitCheck();
692 	struct user_mutex_context* context = contextFetcher.Context();
693 
694 	UserMutexEntry* entry = get_user_mutex_entry(context,
695 		contextFetcher.Address());
696 	{
697 		user_mutex_sem_release(entry, sem, contextFetcher.IsWired());
698 	}
699 	put_user_mutex_entry(context, entry);
700 
701 	return B_OK;
702 }
703