1 /*********************************************************************** 2 * Copyright (c) 2002 Marcus Overhagen. All Rights Reserved. 3 * This file may be used under the terms of the OpenBeOS License. 4 * 5 * Used for BBufferGroup and BBuffer management across teams 6 ***********************************************************************/ 7 #include <Buffer.h> 8 #include "SharedBufferList.h" 9 #include "debug.h" 10 11 12 status_t 13 _shared_buffer_list::Init() 14 { 15 CALLED(); 16 locker_atom = 0; 17 locker_sem = create_sem(0,"shared buffer list lock"); 18 if (locker_sem < B_OK) 19 return (status_t) locker_sem; 20 21 for (int i = 0; i < MAX_BUFFER; i++) { 22 info[i].id = -1; 23 info[i].buffer = 0; 24 info[i].reclaim_sem = 0; 25 info[i].reclaimed = false; 26 } 27 return B_OK; 28 } 29 30 _shared_buffer_list * 31 _shared_buffer_list::Clone(area_id id) 32 { 33 CALLED(); 34 // if id == -1, we are in the media_server team, 35 // and create the initial list, else we clone it 36 37 _shared_buffer_list *adr; 38 status_t status; 39 40 if (id == -1) { 41 size_t size = ((sizeof(_shared_buffer_list)) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1); 42 status = create_area("shared buffer list",(void **)&adr,B_ANY_KERNEL_ADDRESS,size,B_LAZY_LOCK,B_READ_AREA | B_WRITE_AREA); 43 if (status >= B_OK) { 44 status = adr->Init(); 45 if (status != B_OK) 46 delete_area(area_for(adr)); 47 } 48 } else { 49 status = clone_area("shared buffer list clone",(void **)&adr,B_ANY_KERNEL_ADDRESS,B_READ_AREA | B_WRITE_AREA,id); 50 //TRACE("cloned area, id = 0x%08lx, ptr = 0x%08x\n",status,(int)adr); 51 } 52 53 return (status < B_OK) ? NULL : adr; 54 } 55 56 void 57 _shared_buffer_list::Unmap() 58 { 59 // unmap the memory used by this struct 60 // XXX is this save? 61 area_id id; 62 id = area_for(this); 63 if (id >= B_OK) 64 delete_area(id); 65 } 66 67 void 68 _shared_buffer_list::Terminate(sem_id group_reclaim_sem) 69 { 70 CALLED(); 71 72 // delete all BBuffers of this group, then unmap from memory 73 74 if (Lock() != B_OK) { // better not try to access the list unlocked 75 // but at least try to unmap the memory 76 Unmap(); 77 return; 78 } 79 80 for (int32 i = 0; i < buffercount; i++) { 81 if (info[i].reclaim_sem == group_reclaim_sem) { 82 // delete the associated buffer 83 delete info[i].buffer; 84 // decrement buffer count by one 85 buffercount--; 86 // fill the gap in the list with the last entry 87 if (buffercount > 0) { 88 info[i] = info[buffercount]; 89 i--; // make sure we check this entry again 90 } 91 } 92 } 93 94 Unlock(); 95 96 Unmap(); 97 } 98 99 status_t 100 _shared_buffer_list::Lock() 101 { 102 if (atomic_add(&locker_atom, 1) > 0) { 103 status_t status; 104 while (B_INTERRUPTED == (status = acquire_sem(locker_sem))) 105 ; 106 return status; // will only return != B_OK if the media_server crashed or quit 107 } 108 return B_OK; 109 } 110 111 status_t 112 _shared_buffer_list::Unlock() 113 { 114 if (atomic_add(&locker_atom, -1) > 1) 115 return release_sem(locker_sem); // will only return != B_OK if the media_server crashed or quit 116 return B_OK; 117 } 118 119 status_t 120 _shared_buffer_list::AddBuffer(sem_id group_reclaim_sem, BBuffer *buffer) 121 { 122 CALLED(); 123 124 if (buffer == NULL) 125 return B_BAD_VALUE; 126 127 if (Lock() != B_OK) 128 return B_ERROR; 129 130 if (buffercount == MAX_BUFFER) { 131 Unlock(); 132 debugger("we are doomed"); 133 return B_ERROR; 134 } 135 136 info[buffercount].id = buffer->ID(); 137 info[buffercount].buffer = buffer; 138 info[buffercount].reclaim_sem = group_reclaim_sem; 139 info[buffercount].reclaimed = true; 140 buffercount++; 141 142 status_t status1 = release_sem_etc(group_reclaim_sem,1,B_DO_NOT_RESCHEDULE); 143 status_t status2 = Unlock(); 144 145 return (status1 == B_OK && status2 == B_OK) ? B_OK : B_ERROR; 146 } 147 148 status_t 149 _shared_buffer_list::RequestBuffer(sem_id group_reclaim_sem, int32 buffers_in_group, size_t size, media_buffer_id wantID, BBuffer **buffer, bigtime_t timeout) 150 { 151 CALLED(); 152 // we always search for a buffer from the group indicated by group_reclaim_sem first 153 // if "size" != 0, we search for a buffer that is "size" bytes or larger 154 // if "wantID" != 0, we search for a buffer with this id 155 // if "*buffer" != NULL, we search for a buffer at this address 156 // if we found a buffer, we also need to mark it in all other groups as requested 157 // and also once need to acquire the reclaim_sem of the other groups 158 159 status_t status; 160 uint32 acquire_flags; 161 int32 count; 162 163 if (timeout <= 0) { 164 timeout = 0; 165 acquire_flags = B_RELATIVE_TIMEOUT; 166 } else if (timeout != B_INFINITE_TIMEOUT) { 167 timeout += system_time(); 168 acquire_flags = B_ABSOLUTE_TIMEOUT; 169 } else { 170 //timeout is B_INFINITE_TIMEOUT 171 acquire_flags = B_RELATIVE_TIMEOUT; 172 } 173 174 // with each itaration we request one more buffer, since we need to skip the buffers that don't fit the request 175 count = 1; 176 177 do { 178 while (B_INTERRUPTED == (status = acquire_sem_etc(group_reclaim_sem, count, acquire_flags, timeout))) 179 ; 180 if (status != B_OK) 181 return status; 182 183 // try to exit savely if the lock fails 184 if (Lock() != B_OK) { 185 release_sem_etc(group_reclaim_sem, count, 0); 186 return B_ERROR; 187 } 188 189 for (int32 i = 0; i < buffercount; i++) { 190 // we need a BBuffer from the group, and it must be marked as reclaimed 191 if (info[i].reclaim_sem == group_reclaim_sem && info[i].reclaimed) { 192 if ( 193 (size != 0 && size <= info[i].buffer->SizeAvailable()) || 194 (*buffer != 0 && info[i].buffer == *buffer) || 195 (wantID != 0 && info[i].id == wantID) 196 ) { 197 // we found a buffer 198 info[i].reclaimed = false; 199 *buffer = info[i].buffer; 200 // if we requested more than one buffer, release the rest 201 if (count > 1) 202 release_sem_etc(group_reclaim_sem, count - 1, B_DO_NOT_RESCHEDULE); 203 204 // and mark all buffers with the same ID as requested in all other buffer groups 205 RequestBufferInOtherGroups(group_reclaim_sem, info[i].buffer->ID()); 206 207 Unlock(); 208 return B_OK; 209 } 210 } 211 } 212 213 release_sem_etc(group_reclaim_sem, count, B_DO_NOT_RESCHEDULE); 214 if (Unlock() != B_OK) 215 return B_ERROR; 216 217 // prepare to request one more buffer next time 218 count++; 219 } while (count <= buffers_in_group); 220 221 return B_ERROR; 222 } 223 224 void 225 _shared_buffer_list::RequestBufferInOtherGroups(sem_id group_reclaim_sem, media_buffer_id id) 226 { 227 for (int32 i = 0; i < buffercount; i++) { 228 // find buffers with same id, but belonging to other groups 229 if (info[i].id == id && info[i].reclaim_sem != group_reclaim_sem) { 230 231 // and mark them as requested 232 // XXX this can deadlock if BBuffers with same media_buffer_id 233 // XXX exist in more than one BBufferGroup, and RequestBuffer() 234 // XXX is called on both groups (which should not be done). 235 status_t status; 236 while (B_INTERRUPTED == (status = acquire_sem(info[i].reclaim_sem))) 237 ; 238 // try to skip entries that belong to crashed teams 239 if (status != B_OK) 240 continue; 241 242 if (info[i].reclaimed == false) { 243 TRACE("Error, BBuffer 0x%08x, id = 0x%08x not reclaimed while requesting\n",(int)info[i].buffer,(int)id); 244 continue; 245 } 246 247 info[i].reclaimed = false; 248 } 249 } 250 } 251 252 status_t 253 _shared_buffer_list::RecycleBuffer(BBuffer *buffer) 254 { 255 CALLED(); 256 257 int reclaimed_count; 258 259 media_buffer_id id = buffer->ID(); 260 261 if (Lock() != B_OK) 262 return B_ERROR; 263 264 reclaimed_count = 0; 265 for (int32 i = 0; i < buffercount; i++) { 266 // find the buffer id, and reclaim it in all groups it belongs to 267 if (info[i].id == id) { 268 reclaimed_count++; 269 if (info[i].reclaimed) { 270 TRACE("Error, BBuffer 0x%08x, id = 0x%08x already reclaimed\n",(int)buffer,(int)id); 271 continue; 272 } 273 info[i].reclaimed = true; 274 release_sem_etc(info[i].reclaim_sem, 1, B_DO_NOT_RESCHEDULE); 275 } 276 } 277 if (Unlock() != B_OK) 278 return B_ERROR; 279 280 if (reclaimed_count == 0) { 281 TRACE("Error, BBuffer 0x%08x, id = 0x%08x NOT reclaimed\n",(int)buffer,(int)id); 282 return B_ERROR; 283 } 284 285 return B_OK; 286 } 287 288 status_t 289 _shared_buffer_list::GetBufferList(sem_id group_reclaim_sem, int32 buf_count, BBuffer **out_buffers) 290 { 291 CALLED(); 292 293 int32 found; 294 295 found = 0; 296 297 if (Lock() != B_OK) 298 return B_ERROR; 299 300 for (int32 i = 0; i < buffercount; i++) 301 if (info[i].reclaim_sem == group_reclaim_sem) { 302 out_buffers[found++] = info[i].buffer; 303 if (found == buf_count) 304 break; 305 } 306 307 if (Unlock() != B_OK) 308 return B_ERROR; 309 310 return (found == buf_count) ? B_OK : B_ERROR; 311 } 312 313