1 /* 2 * Copyright 2006-2007, Haiku, Inc. All Rights Reserved. 3 * Distributed under the terms of the MIT License. 4 * 5 * Authors: 6 * Axel Dörfler, axeld@pinc-software.de 7 */ 8 9 10 #include <Debug.h> 11 12 #include "accelerant.h" 13 #include "accelerant_protos.h" 14 #include "commands.h" 15 16 17 #undef TRACE 18 //#define TRACE_ENGINE 19 #ifdef TRACE_ENGINE 20 # define TRACE(x...) _sPrintf("intel_extreme: " x) 21 #else 22 # define TRACE(x...) 23 #endif 24 25 #define ERROR(x...) _sPrintf("intel_extreme: " x) 26 #define CALLED(x...) TRACE("CALLED %s\n", __PRETTY_FUNCTION__) 27 28 29 static engine_token sEngineToken = {1, 0 /*B_2D_ACCELERATION*/, NULL}; 30 31 32 QueueCommands::QueueCommands(ring_buffer &ring) 33 : 34 fRingBuffer(ring) 35 { 36 acquire_lock(&fRingBuffer.lock); 37 } 38 39 40 QueueCommands::~QueueCommands() 41 { 42 if (fRingBuffer.position & 0x07) { 43 // make sure the command is properly aligned 44 Write(COMMAND_NOOP); 45 } 46 47 // We must make sure memory is written back in case the ring buffer 48 // is in write combining mode - releasing the lock does this, as the 49 // buffer is flushed on a locked memory operation (which is what this 50 // benaphore does), but it must happen before writing the new tail... 51 int32 flush; 52 atomic_add(&flush, 1); 53 54 write32(fRingBuffer.register_base + RING_BUFFER_TAIL, fRingBuffer.position); 55 56 release_lock(&fRingBuffer.lock); 57 } 58 59 60 void 61 QueueCommands::Put(struct command &command, size_t size) 62 { 63 uint32 count = size / sizeof(uint32); 64 uint32 *data = command.Data(); 65 66 MakeSpace(count); 67 68 for (uint32 i = 0; i < count; i++) { 69 Write(data[i]); 70 } 71 } 72 73 74 void 75 QueueCommands::PutFlush() 76 { 77 MakeSpace(2); 78 79 Write(COMMAND_FLUSH); 80 Write(COMMAND_NOOP); 81 } 82 83 84 void 85 QueueCommands::PutWaitFor(uint32 event) 86 { 87 MakeSpace(2); 88 89 Write(COMMAND_WAIT_FOR_EVENT | event); 90 Write(COMMAND_NOOP); 91 } 92 93 94 void 95 QueueCommands::PutOverlayFlip(uint32 mode, bool updateCoefficients) 96 { 97 MakeSpace(2); 98 99 Write(COMMAND_OVERLAY_FLIP | mode); 100 101 uint32 registers; 102 // G33 does not need a physical address for the overlay registers 103 if (intel_uses_physical_overlay(*gInfo->shared_info)) 104 registers = gInfo->shared_info->physical_overlay_registers; 105 else 106 registers = gInfo->shared_info->overlay_offset; 107 108 Write(registers | (updateCoefficients ? OVERLAY_UPDATE_COEFFICIENTS : 0)); 109 } 110 111 112 void 113 QueueCommands::MakeSpace(uint32 size) 114 { 115 ASSERT((size & 1) == 0); 116 117 size *= sizeof(uint32); 118 bigtime_t start = system_time(); 119 120 while (fRingBuffer.space_left < size) { 121 // wait until more space is free 122 uint32 head = read32(fRingBuffer.register_base + RING_BUFFER_HEAD) 123 & INTEL_RING_BUFFER_HEAD_MASK; 124 125 if (head <= fRingBuffer.position) 126 head += fRingBuffer.size; 127 128 fRingBuffer.space_left = head - fRingBuffer.position; 129 130 if (fRingBuffer.space_left < size) { 131 if (system_time() > start + 1000000LL) { 132 ERROR("engine stalled, head %" B_PRIx32 "\n", head); 133 break; 134 } 135 spin(10); 136 } 137 } 138 139 fRingBuffer.space_left -= size; 140 } 141 142 143 void 144 QueueCommands::Write(uint32 data) 145 { 146 uint32 *target = (uint32 *)(fRingBuffer.base + fRingBuffer.position); 147 *target = data; 148 149 fRingBuffer.position = (fRingBuffer.position + sizeof(uint32)) 150 & (fRingBuffer.size - 1); 151 } 152 153 154 // #pragma mark - 155 156 157 void 158 uninit_ring_buffer(ring_buffer &ringBuffer) 159 { 160 uninit_lock(&ringBuffer.lock); 161 write32(ringBuffer.register_base + RING_BUFFER_CONTROL, 0); 162 } 163 164 165 void 166 setup_ring_buffer(ring_buffer &ringBuffer, const char* name) 167 { 168 TRACE("Setup ring buffer %s, offset %lx, size %lx\n", name, 169 ringBuffer.offset, ringBuffer.size); 170 171 if (init_lock(&ringBuffer.lock, name) < B_OK) { 172 // disable ring buffer 173 ringBuffer.size = 0; 174 return; 175 } 176 177 uint32 ring = ringBuffer.register_base; 178 ringBuffer.position = 0; 179 ringBuffer.space_left = ringBuffer.size; 180 181 write32(ring + RING_BUFFER_TAIL, 0); 182 write32(ring + RING_BUFFER_START, ringBuffer.offset); 183 write32(ring + RING_BUFFER_CONTROL, 184 ((ringBuffer.size - B_PAGE_SIZE) & INTEL_RING_BUFFER_SIZE_MASK) 185 | INTEL_RING_BUFFER_ENABLED); 186 } 187 188 189 // #pragma mark - engine management 190 191 192 /*! Return number of hardware engines */ 193 uint32 194 intel_accelerant_engine_count(void) 195 { 196 CALLED(); 197 return 1; 198 } 199 200 201 status_t 202 intel_acquire_engine(uint32 capabilities, uint32 maxWait, sync_token* syncToken, 203 engine_token** _engineToken) 204 { 205 CALLED(); 206 *_engineToken = &sEngineToken; 207 208 if (acquire_lock(&gInfo->shared_info->engine_lock) != B_OK) 209 return B_ERROR; 210 211 if (syncToken) 212 intel_sync_to_token(syncToken); 213 214 return B_OK; 215 } 216 217 218 status_t 219 intel_release_engine(engine_token* engineToken, sync_token* syncToken) 220 { 221 CALLED(); 222 if (syncToken != NULL) 223 syncToken->engine_id = engineToken->engine_id; 224 225 release_lock(&gInfo->shared_info->engine_lock); 226 return B_OK; 227 } 228 229 230 void 231 intel_wait_engine_idle(void) 232 { 233 CALLED(); 234 235 { 236 QueueCommands queue(gInfo->shared_info->primary_ring_buffer); 237 queue.PutFlush(); 238 } 239 240 // TODO: this should only be a temporary solution! 241 // a better way to do this would be to acquire the engine's lock and 242 // sync to the latest token 243 244 bigtime_t start = system_time(); 245 246 ring_buffer &ring = gInfo->shared_info->primary_ring_buffer; 247 uint32 head, tail; 248 while (true) { 249 head = read32(ring.register_base + RING_BUFFER_HEAD) 250 & INTEL_RING_BUFFER_HEAD_MASK; 251 tail = read32(ring.register_base + RING_BUFFER_TAIL) 252 & INTEL_RING_BUFFER_HEAD_MASK; 253 254 if (head == tail) 255 break; 256 257 if (system_time() > start + 1000000LL) { 258 // the engine seems to be locked up! 259 ERROR("engine locked up, head %" B_PRIx32 "!\n", head); 260 break; 261 } 262 263 spin(10); 264 } 265 } 266 267 268 status_t 269 intel_get_sync_token(engine_token* engineToken, sync_token* syncToken) 270 { 271 CALLED(); 272 return B_OK; 273 } 274 275 276 status_t 277 intel_sync_to_token(sync_token* syncToken) 278 { 279 CALLED(); 280 intel_wait_engine_idle(); 281 return B_OK; 282 } 283 284 285 // #pragma mark - engine acceleration 286 287 288 void 289 intel_screen_to_screen_blit(engine_token* token, blit_params* params, 290 uint32 count) 291 { 292 QueueCommands queue(gInfo->shared_info->primary_ring_buffer); 293 294 for (uint32 i = 0; i < count; i++) { 295 xy_source_blit_command blit; 296 blit.source_left = params[i].src_left; 297 blit.source_top = params[i].src_top; 298 blit.dest_left = params[i].dest_left; 299 blit.dest_top = params[i].dest_top; 300 blit.dest_right = params[i].dest_left + params[i].width + 1; 301 blit.dest_bottom = params[i].dest_top + params[i].height + 1; 302 303 queue.Put(blit, sizeof(blit)); 304 } 305 } 306 307 308 void 309 intel_fill_rectangle(engine_token* token, uint32 color, 310 fill_rect_params* params, uint32 count) 311 { 312 QueueCommands queue(gInfo->shared_info->primary_ring_buffer); 313 314 for (uint32 i = 0; i < count; i++) { 315 xy_color_blit_command blit(false); 316 blit.dest_left = params[i].left; 317 blit.dest_top = params[i].top; 318 blit.dest_right = params[i].right + 1; 319 blit.dest_bottom = params[i].bottom + 1; 320 blit.color = color; 321 322 queue.Put(blit, sizeof(blit)); 323 } 324 } 325 326 327 void 328 intel_invert_rectangle(engine_token* token, fill_rect_params* params, 329 uint32 count) 330 { 331 QueueCommands queue(gInfo->shared_info->primary_ring_buffer); 332 333 for (uint32 i = 0; i < count; i++) { 334 xy_color_blit_command blit(true); 335 blit.dest_left = params[i].left; 336 blit.dest_top = params[i].top; 337 blit.dest_right = params[i].right + 1; 338 blit.dest_bottom = params[i].bottom + 1; 339 blit.color = 0xffffffff; 340 341 queue.Put(blit, sizeof(blit)); 342 } 343 } 344 345 346 void 347 intel_fill_span(engine_token* token, uint32 color, uint16* _params, 348 uint32 count) 349 { 350 struct params { 351 uint16 top; 352 uint16 left; 353 uint16 right; 354 } *params = (struct params*)_params; 355 356 QueueCommands queue(gInfo->shared_info->primary_ring_buffer); 357 358 xy_setup_mono_pattern_command setup; 359 setup.background_color = color; 360 setup.pattern = 0; 361 queue.Put(setup, sizeof(setup)); 362 363 for (uint32 i = 0; i < count; i++) { 364 xy_scanline_blit_command blit; 365 blit.dest_left = params[i].left; 366 blit.dest_top = params[i].top; 367 blit.dest_right = params[i].right; 368 blit.dest_bottom = params[i].top; 369 } 370 } 371