1 /* 2 * Copyright 2016, Dmytro Shynkevych, dm.shynk@gmail.com 3 * Copyright 2023, Haiku, Inc. All rights reserved. 4 * Distributed under the terms of the MIT license. 5 */ 6 7 8 #include <pthread.h> 9 #include "pthread_private.h" 10 11 #include <stdio.h> 12 #include <stdlib.h> 13 #include <string.h> 14 15 #include <syscall_utils.h> 16 #include <syscalls.h> 17 #include <user_mutex_defs.h> 18 19 20 #define BARRIER_FLAG_SHARED 0x80000000 21 22 23 static const pthread_barrierattr pthread_barrierattr_default = { 24 /* .process_shared = */ false 25 }; 26 27 28 int 29 pthread_barrier_init(pthread_barrier_t* barrier, 30 const pthread_barrierattr_t* _attr, unsigned count) 31 { 32 const pthread_barrierattr* attr = _attr != NULL 33 ? *_attr : &pthread_barrierattr_default; 34 35 if (barrier == NULL || attr == NULL || count < 1) 36 return B_BAD_VALUE; 37 38 barrier->flags = attr->process_shared ? BARRIER_FLAG_SHARED : 0; 39 barrier->lock = B_USER_MUTEX_LOCKED; 40 barrier->mutex = 0; 41 barrier->waiter_count = 0; 42 barrier->waiter_max = count; 43 44 return B_OK; 45 } 46 47 48 static status_t 49 barrier_lock(__haiku_std_int32* mutex, uint32 flags) 50 { 51 const int32 oldValue = atomic_test_and_set((int32*)mutex, B_USER_MUTEX_LOCKED, 0); 52 if (oldValue != 0) { 53 status_t error; 54 do { 55 error = _kern_mutex_lock((int32*)mutex, NULL, flags, 0); 56 } while (error == B_INTERRUPTED); 57 58 if (error != B_OK) 59 return error; 60 } 61 return B_OK; 62 } 63 64 65 static void 66 barrier_unlock(__haiku_std_int32* mutex, uint32 flags) 67 { 68 int32 oldValue = atomic_and((int32*)mutex, 69 ~(int32)B_USER_MUTEX_LOCKED); 70 if ((oldValue & B_USER_MUTEX_WAITING) != 0) 71 _kern_mutex_unblock((int32*)mutex, flags); 72 } 73 74 75 static void 76 barrier_ensure_idle(pthread_barrier_t* barrier) 77 { 78 const uint32 flags = (barrier->flags & BARRIER_FLAG_SHARED) ? B_USER_MUTEX_SHARED : 0; 79 80 // waiter_count < 0 means other threads are still exiting. 81 // Loop (usually only one iteration needed) until this is no longer the case. 82 while (atomic_get((int32*)&barrier->waiter_count) < 0) { 83 status_t status = barrier_lock(&barrier->mutex, flags); 84 if (status != B_OK) 85 return; 86 87 barrier_unlock(&barrier->mutex, flags); 88 } 89 } 90 91 92 int 93 pthread_barrier_wait(pthread_barrier_t* barrier) 94 { 95 if (barrier == NULL) 96 return B_BAD_VALUE; 97 98 if (barrier->waiter_max == 1) 99 return PTHREAD_BARRIER_SERIAL_THREAD; 100 101 const uint32 mutexFlags = (barrier->flags & BARRIER_FLAG_SHARED) ? B_USER_MUTEX_SHARED : 0; 102 barrier_ensure_idle(barrier); 103 104 if (atomic_add((int32*)&barrier->waiter_count, 1) == (barrier->waiter_max - 1)) { 105 // We are the last one in. Lock the barrier mutex. 106 barrier_lock(&barrier->mutex, mutexFlags); 107 108 // Wake everyone else up. 109 barrier->waiter_count = (-barrier->waiter_max) + 1; 110 _kern_mutex_unblock((int32*)&barrier->lock, mutexFlags | B_USER_MUTEX_UNBLOCK_ALL); 111 112 // Return with the barrier mutex still locked, as waiter_count < 0. 113 // The last thread out will take care of unlocking it and resetting state. 114 return PTHREAD_BARRIER_SERIAL_THREAD; 115 } 116 117 // We aren't the last one in. Wait until we are woken up. 118 do { 119 _kern_mutex_lock((int32*)&barrier->lock, "barrier wait", mutexFlags, 0); 120 } while (barrier->waiter_count > 0); 121 122 // Release the barrier, so that any later threads trying to acquire it wake up. 123 barrier_unlock(&barrier->lock, mutexFlags); 124 125 if (atomic_add((int32*)&barrier->waiter_count, 1) == -1) { 126 // We are the last one out. Reset state and unlock. 127 barrier->lock = B_USER_MUTEX_LOCKED; 128 barrier_unlock(&barrier->mutex, mutexFlags); 129 } 130 131 return 0; 132 } 133 134 135 int 136 pthread_barrier_destroy(pthread_barrier_t* barrier) 137 { 138 barrier_ensure_idle(barrier); 139 return B_OK; 140 } 141 142 143 int 144 pthread_barrierattr_init(pthread_barrierattr_t* _attr) 145 { 146 pthread_barrierattr* attr = (pthread_barrierattr*)malloc( 147 sizeof(pthread_barrierattr)); 148 149 if (attr == NULL) 150 return B_NO_MEMORY; 151 152 *attr = pthread_barrierattr_default; 153 *_attr = attr; 154 155 return B_OK; 156 } 157 158 159 int 160 pthread_barrierattr_destroy(pthread_barrierattr_t* _attr) 161 { 162 pthread_barrierattr* attr = _attr != NULL ? *_attr : NULL; 163 164 if (attr == NULL) 165 return B_BAD_VALUE; 166 167 free(attr); 168 169 return B_OK; 170 } 171 172 173 int 174 pthread_barrierattr_getpshared(const pthread_barrierattr_t* _attr, int* shared) 175 { 176 pthread_barrierattr* attr; 177 178 if (_attr == NULL || (attr = *_attr) == NULL || shared == NULL) 179 return B_BAD_VALUE; 180 181 *shared = attr->process_shared 182 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE; 183 184 return B_OK; 185 } 186 187 188 int 189 pthread_barrierattr_setpshared(pthread_barrierattr_t* _attr, int shared) 190 { 191 pthread_barrierattr* attr; 192 193 if (_attr == NULL || (attr = *_attr) == NULL 194 || shared < PTHREAD_PROCESS_PRIVATE 195 || shared > PTHREAD_PROCESS_SHARED) { 196 return B_BAD_VALUE; 197 } 198 199 attr->process_shared = shared == PTHREAD_PROCESS_SHARED; 200 201 return 0; 202 } 203