1 /*********************************************************************** 2 * Copyright (c) 2002 Marcus Overhagen. All Rights Reserved. 3 * This file may be used under the terms of the OpenBeOS License. 4 * 5 * Used for BBufferGroup and BBuffer management across teams 6 ***********************************************************************/ 7 #include <Buffer.h> 8 #include <string.h> 9 #include "SharedBufferList.h" 10 #include "debug.h" 11 12 13 status_t 14 _shared_buffer_list::Init() 15 { 16 CALLED(); 17 locker_atom = 0; 18 locker_sem = create_sem(0,"shared buffer list lock"); 19 if (locker_sem < B_OK) 20 return (status_t) locker_sem; 21 22 for (int i = 0; i < MAX_BUFFER; i++) { 23 info[i].id = -1; 24 info[i].buffer = 0; 25 info[i].reclaim_sem = 0; 26 info[i].reclaimed = false; 27 } 28 return B_OK; 29 } 30 31 _shared_buffer_list * 32 _shared_buffer_list::Clone(area_id id) 33 { 34 CALLED(); 35 // if id == -1, we are in the media_server team, 36 // and create the initial list, else we clone it 37 38 _shared_buffer_list *adr; 39 status_t status; 40 41 if (id == -1) { 42 size_t size = ((sizeof(_shared_buffer_list)) + (B_PAGE_SIZE - 1)) & ~(B_PAGE_SIZE - 1); 43 status = create_area("shared buffer list",(void **)&adr,B_ANY_ADDRESS,size,B_LAZY_LOCK,B_READ_AREA | B_WRITE_AREA); 44 if (status >= B_OK) { 45 status = adr->Init(); 46 if (status != B_OK) 47 delete_area(area_for(adr)); 48 } 49 } else { 50 status = clone_area("shared buffer list clone", (void **)&adr, B_ANY_ADDRESS, B_READ_AREA|B_WRITE_AREA, id); 51 if (status < B_OK) { 52 ERROR("_shared_buffer_list::Clone() clone area: %ld err = %s\n", id, strerror(status)); 53 } 54 } 55 56 return (status < B_OK) ? NULL : adr; 57 } 58 59 void 60 _shared_buffer_list::Unmap() 61 { 62 CALLED(); 63 // unmap the memory used by this struct 64 // XXX is this save? 65 area_id id; 66 id = area_for(this); 67 if (id >= B_OK) 68 delete_area(id); 69 } 70 71 void 72 _shared_buffer_list::Terminate(sem_id group_reclaim_sem) 73 { 74 CALLED(); 75 76 // delete all BBuffers of this group, then unmap from memory 77 78 if (Lock() != B_OK) { // better not try to access the list unlocked 79 // but at least try to unmap the memory 80 Unmap(); 81 return; 82 } 83 84 for (int32 i = 0; i < buffercount; i++) { 85 if (info[i].reclaim_sem == group_reclaim_sem) { 86 // delete the associated buffer 87 delete info[i].buffer; 88 // decrement buffer count by one 89 buffercount--; 90 // fill the gap in the list with the last entry 91 if (buffercount > 0) { 92 info[i] = info[buffercount]; 93 i--; // make sure we check this entry again 94 } 95 } 96 } 97 98 Unlock(); 99 100 Unmap(); 101 } 102 103 status_t 104 _shared_buffer_list::Lock() 105 { 106 if (atomic_add(&locker_atom, 1) > 0) { 107 status_t status; 108 while (B_INTERRUPTED == (status = acquire_sem(locker_sem))) 109 ; 110 return status; // will only return != B_OK if the media_server crashed or quit 111 } 112 return B_OK; 113 } 114 115 status_t 116 _shared_buffer_list::Unlock() 117 { 118 if (atomic_add(&locker_atom, -1) > 1) 119 return release_sem(locker_sem); // will only return != B_OK if the media_server crashed or quit 120 return B_OK; 121 } 122 123 status_t 124 _shared_buffer_list::AddBuffer(sem_id group_reclaim_sem, BBuffer *buffer) 125 { 126 CALLED(); 127 128 if (buffer == NULL) 129 return B_BAD_VALUE; 130 131 if (Lock() != B_OK) 132 return B_ERROR; 133 134 if (buffercount == MAX_BUFFER) { 135 Unlock(); 136 debugger("we are doomed"); 137 return B_ERROR; 138 } 139 140 info[buffercount].id = buffer->ID(); 141 info[buffercount].buffer = buffer; 142 info[buffercount].reclaim_sem = group_reclaim_sem; 143 info[buffercount].reclaimed = true; 144 buffercount++; 145 146 status_t status1 = release_sem_etc(group_reclaim_sem,1,B_DO_NOT_RESCHEDULE); 147 status_t status2 = Unlock(); 148 149 return (status1 == B_OK && status2 == B_OK) ? B_OK : B_ERROR; 150 } 151 152 status_t 153 _shared_buffer_list::RequestBuffer(sem_id group_reclaim_sem, int32 buffers_in_group, size_t size, media_buffer_id wantID, BBuffer **buffer, bigtime_t timeout) 154 { 155 CALLED(); 156 // we always search for a buffer from the group indicated by group_reclaim_sem first 157 // if "size" != 0, we search for a buffer that is "size" bytes or larger 158 // if "wantID" != 0, we search for a buffer with this id 159 // if "*buffer" != NULL, we search for a buffer at this address 160 // if we found a buffer, we also need to mark it in all other groups as requested 161 // and also once need to acquire the reclaim_sem of the other groups 162 163 status_t status; 164 uint32 acquire_flags; 165 int32 count; 166 167 if (timeout <= 0) { 168 timeout = 0; 169 acquire_flags = B_RELATIVE_TIMEOUT; 170 } else if (timeout != B_INFINITE_TIMEOUT) { 171 timeout += system_time(); 172 acquire_flags = B_ABSOLUTE_TIMEOUT; 173 } else { 174 //timeout is B_INFINITE_TIMEOUT 175 acquire_flags = B_RELATIVE_TIMEOUT; 176 } 177 178 // with each itaration we request one more buffer, since we need to skip the buffers that don't fit the request 179 count = 1; 180 181 do { 182 while (B_INTERRUPTED == (status = acquire_sem_etc(group_reclaim_sem, count, acquire_flags, timeout))) 183 ; 184 if (status != B_OK) 185 return status; 186 187 // try to exit savely if the lock fails 188 if (Lock() != B_OK) { 189 ERROR("_shared_buffer_list:: RequestBuffer: Lock failed\n"); 190 release_sem_etc(group_reclaim_sem, count, 0); 191 return B_ERROR; 192 } 193 194 for (int32 i = 0; i < buffercount; i++) { 195 // we need a BBuffer from the group, and it must be marked as reclaimed 196 if (info[i].reclaim_sem == group_reclaim_sem && info[i].reclaimed) { 197 if ( 198 (size != 0 && size <= info[i].buffer->SizeAvailable()) || 199 (*buffer != 0 && info[i].buffer == *buffer) || 200 (wantID != 0 && info[i].id == wantID) 201 ) { 202 // we found a buffer 203 info[i].reclaimed = false; 204 *buffer = info[i].buffer; 205 // if we requested more than one buffer, release the rest 206 if (count > 1) 207 release_sem_etc(group_reclaim_sem, count - 1, B_DO_NOT_RESCHEDULE); 208 209 // and mark all buffers with the same ID as requested in all other buffer groups 210 RequestBufferInOtherGroups(group_reclaim_sem, info[i].buffer->ID()); 211 212 Unlock(); 213 return B_OK; 214 } 215 } 216 } 217 218 release_sem_etc(group_reclaim_sem, count, B_DO_NOT_RESCHEDULE); 219 if (Unlock() != B_OK) { 220 ERROR("_shared_buffer_list:: RequestBuffer: unlock failed\n"); 221 return B_ERROR; 222 } 223 // prepare to request one more buffer next time 224 count++; 225 } while (count <= buffers_in_group); 226 227 ERROR("_shared_buffer_list:: RequestBuffer: no buffer found\n"); 228 return B_ERROR; 229 } 230 231 void 232 _shared_buffer_list::RequestBufferInOtherGroups(sem_id group_reclaim_sem, media_buffer_id id) 233 { 234 for (int32 i = 0; i < buffercount; i++) { 235 // find buffers with same id, but belonging to other groups 236 if (info[i].id == id && info[i].reclaim_sem != group_reclaim_sem) { 237 238 // and mark them as requested 239 // XXX this can deadlock if BBuffers with same media_buffer_id 240 // XXX exist in more than one BBufferGroup, and RequestBuffer() 241 // XXX is called on both groups (which should not be done). 242 status_t status; 243 while (B_INTERRUPTED == (status = acquire_sem(info[i].reclaim_sem))) 244 ; 245 // try to skip entries that belong to crashed teams 246 if (status != B_OK) 247 continue; 248 249 if (info[i].reclaimed == false) { 250 ERROR("_shared_buffer_list:: RequestBufferInOtherGroups BBuffer %p, id = %ld not reclaimed while requesting\n", info[i].buffer, id); 251 continue; 252 } 253 254 info[i].reclaimed = false; 255 } 256 } 257 } 258 259 status_t 260 _shared_buffer_list::RecycleBuffer(BBuffer *buffer) 261 { 262 CALLED(); 263 264 int reclaimed_count; 265 266 // media_buffer_id id = buffer->ID(); 267 media_buffer_id id = buffer->fBufferID; 268 269 if (Lock() != B_OK) 270 return B_ERROR; 271 272 reclaimed_count = 0; 273 for (int32 i = 0; i < buffercount; i++) { 274 // find the buffer id, and reclaim it in all groups it belongs to 275 if (info[i].id == id) { 276 reclaimed_count++; 277 if (info[i].reclaimed) { 278 ERROR("_shared_buffer_list::RecycleBuffer, BBuffer %p, id = %ld already reclaimed\n", buffer, id); 279 DEBUG_ONLY(debugger("buffer already reclaimed")); 280 continue; 281 } 282 info[i].reclaimed = true; 283 release_sem_etc(info[i].reclaim_sem, 1, B_DO_NOT_RESCHEDULE); 284 } 285 } 286 if (Unlock() != B_OK) 287 return B_ERROR; 288 289 if (reclaimed_count == 0) { 290 ERROR("shared_buffer_list::RecycleBuffer, BBuffer %p, id = %ld NOT reclaimed\n", buffer, id); 291 return B_ERROR; 292 } 293 294 return B_OK; 295 } 296 297 status_t 298 _shared_buffer_list::GetBufferList(sem_id group_reclaim_sem, int32 buf_count, BBuffer **out_buffers) 299 { 300 CALLED(); 301 302 int32 found; 303 304 found = 0; 305 306 if (Lock() != B_OK) 307 return B_ERROR; 308 309 for (int32 i = 0; i < buffercount; i++) 310 if (info[i].reclaim_sem == group_reclaim_sem) { 311 out_buffers[found++] = info[i].buffer; 312 if (found == buf_count) 313 break; 314 } 315 316 if (Unlock() != B_OK) 317 return B_ERROR; 318 319 return (found == buf_count) ? B_OK : B_ERROR; 320 } 321 322