1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Stanislaw Skowronek 23 */ 24 25 /* Rewritten for the Haiku Operating System Radeon HD driver 26 * Author: 27 * Alexander von Gluck, kallisti5@unixzen.com 28 */ 29 30 31 #include <Debug.h> 32 33 #include "atom.h" 34 #include "atom-names.h" 35 #include "atom-bits.h" 36 37 38 /* AtomBIOS loop detection 39 * Number of seconds of identical jmp operations 40 * before detecting a fault. 41 */ 42 #define ATOM_OP_JMP_TIMEOUT 5 43 44 // *** Tracing 45 #undef TRACE 46 //#define TRACE_ATOM 47 #ifdef TRACE_ATOM 48 # define TRACE(x...) _sPrintf("radeon_hd: " x) 49 #else 50 # define TRACE(x...) ; 51 #endif 52 53 #define ERROR(x...) _sPrintf("radeon_hd: " x) 54 55 #define ATOM_COND_ABOVE 0 56 #define ATOM_COND_ABOVEOREQUAL 1 57 #define ATOM_COND_ALWAYS 2 58 #define ATOM_COND_BELOW 3 59 #define ATOM_COND_BELOWOREQUAL 4 60 #define ATOM_COND_EQUAL 5 61 #define ATOM_COND_NOTEQUAL 6 62 63 #define ATOM_PORT_ATI 0 64 #define ATOM_PORT_PCI 1 65 #define ATOM_PORT_SYSIO 2 66 67 #define ATOM_UNIT_MICROSEC 0 68 #define ATOM_UNIT_MILLISEC 1 69 70 #define PLL_INDEX 2 71 #define PLL_DATA 3 72 73 74 typedef struct { 75 atom_context *ctx; 76 77 uint32 *ps, *ws; 78 int ps_shift; 79 uint16 start; 80 uint16 lastJump; 81 uint32 lastJumpCount; 82 bigtime_t jumpStart; 83 bool abort; 84 } atom_exec_context; 85 86 int atom_debug = 0; 87 status_t atom_execute_table_locked(atom_context *ctx, 88 int index, uint32 *params); 89 status_t atom_execute_table(atom_context *ctx, int index, uint32 *params); 90 91 static uint32 atom_arg_mask[8] = {0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 92 0xFF, 0xFF00, 0xFF0000, 0xFF000000}; 93 static int atom_arg_shift[8] = {0, 0, 8, 16, 0, 8, 16, 24}; 94 static int atom_dst_to_src[8][4] = { 95 // translate destination alignment field to the source alignment encoding 96 { 0, 0, 0, 0 }, 97 { 1, 2, 3, 0 }, 98 { 1, 2, 3, 0 }, 99 { 1, 2, 3, 0 }, 100 { 4, 5, 6, 7 }, 101 { 4, 5, 6, 7 }, 102 { 4, 5, 6, 7 }, 103 { 4, 5, 6, 7 }, 104 }; 105 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 }; 106 107 static int debug_depth = 0; 108 109 static uint32 110 atom_iio_execute(atom_context *ctx, int base, uint32 index, uint32 data) 111 { 112 uint32 temp = 0xCDCDCDCD; 113 while (1) 114 switch(CU8(base)) { 115 case ATOM_IIO_NOP: 116 base++; 117 break; 118 case ATOM_IIO_READ: 119 temp = ctx->card->ioreg_read(CU16(base + 1)); 120 base += 3; 121 break; 122 case ATOM_IIO_WRITE: 123 (void)ctx->card->reg_read(CU16(base + 1)); 124 ctx->card->ioreg_write(CU16(base + 1), temp); 125 base += 3; 126 break; 127 case ATOM_IIO_CLEAR: 128 temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 2)); 129 base += 3; 130 break; 131 case ATOM_IIO_SET: 132 temp |= (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 2); 133 base += 3; 134 break; 135 case ATOM_IIO_MOVE_INDEX: 136 temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); 137 temp |= ((index >> CU8(base + 2)) 138 & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 3); 139 base += 4; 140 break; 141 case ATOM_IIO_MOVE_DATA: 142 temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); 143 temp |= ((data >> CU8(base + 2)) 144 & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 3); 145 base += 4; 146 break; 147 case ATOM_IIO_MOVE_ATTR: 148 temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); 149 temp |= ((ctx->io_attr >> CU8(base + 2)) 150 & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 3); 151 base += 4; 152 break; 153 case ATOM_IIO_END: 154 return temp; 155 default: 156 TRACE("%s: Unknown IIO opcode.\n", __func__); 157 return 0; 158 } 159 } 160 161 162 static uint32 163 atom_get_src_int(atom_exec_context *ctx, uint8 attr, int *ptr, 164 uint32 *saved, int print) 165 { 166 uint32 idx, val = 0xCDCDCDCD, align, arg; 167 atom_context *gctx = ctx->ctx; 168 arg = attr & 7; 169 align = (attr >> 3) & 7; 170 switch(arg) { 171 case ATOM_ARG_REG: 172 idx = U16(*ptr); 173 (*ptr)+=2; 174 idx += gctx->reg_block; 175 switch(gctx->io_mode) { 176 case ATOM_IO_MM: 177 val = gctx->card->reg_read(idx); 178 break; 179 case ATOM_IO_PCI: 180 TRACE("%s: PCI registers are not implemented.\n", __func__); 181 return 0; 182 case ATOM_IO_SYSIO: 183 TRACE("%s: SYSIO registers are not implemented.\n", 184 __func__); 185 return 0; 186 default: 187 if (!(gctx->io_mode & 0x80)) { 188 TRACE("%s: Bad IO mode.\n", __func__); 189 return 0; 190 } 191 if (!gctx->iio[gctx->io_mode & 0x7F]) { 192 TRACE("%s: Undefined indirect IO read method %d.\n", 193 __func__, gctx->io_mode & 0x7F); 194 return 0; 195 } 196 val = atom_iio_execute(gctx, 197 gctx->iio[gctx->io_mode & 0x7F], idx, 0); 198 } 199 break; 200 case ATOM_ARG_PS: 201 idx = U8(*ptr); 202 (*ptr)++; 203 val = B_LENDIAN_TO_HOST_INT32(ctx->ps[idx]); 204 // TODO : val = get_unaligned_le32((u32 *)&ctx->ps[idx]); 205 break; 206 case ATOM_ARG_WS: 207 idx = U8(*ptr); 208 (*ptr)++; 209 switch(idx) { 210 case ATOM_WS_QUOTIENT: 211 val = gctx->divmul[0]; 212 break; 213 case ATOM_WS_REMAINDER: 214 val = gctx->divmul[1]; 215 break; 216 case ATOM_WS_DATAPTR: 217 val = gctx->data_block; 218 break; 219 case ATOM_WS_SHIFT: 220 val = gctx->shift; 221 break; 222 case ATOM_WS_OR_MASK: 223 val = 1 << gctx->shift; 224 break; 225 case ATOM_WS_AND_MASK: 226 val = ~(1 << gctx->shift); 227 break; 228 case ATOM_WS_FB_WINDOW: 229 val = gctx->fb_base; 230 break; 231 case ATOM_WS_ATTRIBUTES: 232 val = gctx->io_attr; 233 break; 234 case ATOM_WS_REGPTR: 235 val = gctx->reg_block; 236 break; 237 default: 238 val = ctx->ws[idx]; 239 } 240 break; 241 case ATOM_ARG_ID: 242 idx = U16(*ptr); 243 (*ptr) += 2; 244 val = U32(idx + gctx->data_block); 245 break; 246 case ATOM_ARG_FB: 247 idx = U8(*ptr); 248 (*ptr)++; 249 val = gctx->scratch[((gctx->fb_base + idx) / 4)]; 250 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { 251 ERROR("%s: fb tried to read beyond scratch region!" 252 " %" B_PRIu32 " vs. %" B_PRIu32 "\n", __func__, 253 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); 254 val = 0; 255 } else 256 val = gctx->scratch[(gctx->fb_base / 4) + idx]; 257 break; 258 case ATOM_ARG_IMM: 259 switch(align) { 260 case ATOM_SRC_DWORD: 261 val = U32(*ptr); 262 (*ptr)+=4; 263 return val; 264 case ATOM_SRC_WORD0: 265 case ATOM_SRC_WORD8: 266 case ATOM_SRC_WORD16: 267 val = U16(*ptr); 268 (*ptr) += 2; 269 return val; 270 case ATOM_SRC_BYTE0: 271 case ATOM_SRC_BYTE8: 272 case ATOM_SRC_BYTE16: 273 case ATOM_SRC_BYTE24: 274 val = U8(*ptr); 275 (*ptr)++; 276 return val; 277 } 278 return 0; 279 case ATOM_ARG_PLL: 280 idx = U8(*ptr); 281 (*ptr)++; 282 val = gctx->card->pll_read(idx); 283 break; 284 case ATOM_ARG_MC: 285 idx = U8(*ptr); 286 (*ptr)++; 287 val = gctx->card->mc_read(idx); 288 return 0; 289 } 290 if (saved) 291 *saved = val; 292 val &= atom_arg_mask[align]; 293 val >>= atom_arg_shift[align]; 294 return val; 295 } 296 297 298 static void 299 atom_skip_src_int(atom_exec_context *ctx, uint8 attr, int *ptr) 300 { 301 uint32 align = (attr >> 3) & 7, arg = attr & 7; 302 switch(arg) { 303 case ATOM_ARG_REG: 304 case ATOM_ARG_ID: 305 (*ptr) += 2; 306 break; 307 case ATOM_ARG_PLL: 308 case ATOM_ARG_MC: 309 case ATOM_ARG_PS: 310 case ATOM_ARG_WS: 311 case ATOM_ARG_FB: 312 (*ptr)++; 313 break; 314 case ATOM_ARG_IMM: 315 switch(align) { 316 case ATOM_SRC_DWORD: 317 (*ptr) += 4; 318 return; 319 case ATOM_SRC_WORD0: 320 case ATOM_SRC_WORD8: 321 case ATOM_SRC_WORD16: 322 (*ptr) += 2; 323 return; 324 case ATOM_SRC_BYTE0: 325 case ATOM_SRC_BYTE8: 326 case ATOM_SRC_BYTE16: 327 case ATOM_SRC_BYTE24: 328 (*ptr)++; 329 return; 330 } 331 return; 332 } 333 } 334 335 336 static uint32 337 atom_get_src(atom_exec_context *ctx, uint8 attr, int *ptr) 338 { 339 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 340 } 341 342 343 static uint32 344 atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) 345 { 346 uint32 val = 0xCDCDCDCD; 347 348 switch (align) { 349 case ATOM_SRC_DWORD: 350 val = U32(*ptr); 351 (*ptr) += 4; 352 break; 353 case ATOM_SRC_WORD0: 354 case ATOM_SRC_WORD8: 355 case ATOM_SRC_WORD16: 356 val = U16(*ptr); 357 (*ptr) += 2; 358 break; 359 case ATOM_SRC_BYTE0: 360 case ATOM_SRC_BYTE8: 361 case ATOM_SRC_BYTE16: 362 case ATOM_SRC_BYTE24: 363 val = U8(*ptr); 364 (*ptr)++; 365 break; 366 } 367 return val; 368 } 369 370 371 static uint32 372 atom_get_dst(atom_exec_context *ctx, int arg, uint8 attr, 373 int *ptr, uint32 *saved, int print) 374 { 375 return atom_get_src_int(ctx, 376 arg|atom_dst_to_src[(attr>>3)&7][(attr>>6)&3]<<3, ptr, saved, print); 377 } 378 379 380 static void 381 atom_skip_dst(atom_exec_context *ctx, int arg, uint8 attr, int *ptr) 382 { 383 atom_skip_src_int(ctx, 384 arg|atom_dst_to_src[(attr>>3)&7][(attr>>6)&3]<<3, ptr); 385 } 386 387 388 static void 389 atom_put_dst(atom_exec_context *ctx, int arg, uint8 attr, 390 int *ptr, uint32 val, uint32 saved) 391 { 392 uint32 align = atom_dst_to_src[(attr>>3)&7][(attr>>6)&3], 393 old_val = val, idx; 394 atom_context *gctx = ctx->ctx; 395 old_val &= atom_arg_mask[align] >> atom_arg_shift[align]; 396 val <<= atom_arg_shift[align]; 397 val &= atom_arg_mask[align]; 398 saved &= ~atom_arg_mask[align]; 399 val |= saved; 400 switch(arg) { 401 case ATOM_ARG_REG: 402 idx = U16(*ptr); 403 (*ptr) += 2; 404 idx += gctx->reg_block; 405 switch(gctx->io_mode) { 406 case ATOM_IO_MM: 407 if (idx == 0) 408 gctx->card->reg_write(idx, val << 2); 409 else 410 gctx->card->reg_write(idx, val); 411 break; 412 case ATOM_IO_PCI: 413 TRACE("%s: PCI registers are not implemented.\n", 414 __func__); 415 return; 416 case ATOM_IO_SYSIO: 417 TRACE("%s: SYSIO registers are not implemented.\n", 418 __func__); 419 return; 420 default: 421 if (!(gctx->io_mode & 0x80)) { 422 TRACE("%s: Bad IO mode.\n", __func__); 423 return; 424 } 425 if (!gctx->iio[gctx->io_mode & 0xFF]) { 426 TRACE("%s: Undefined indirect IO write method %d\n", 427 __func__, gctx->io_mode & 0x7F); 428 return; 429 } 430 atom_iio_execute(gctx, gctx->iio[gctx->io_mode&0xFF], 431 idx, val); 432 } 433 break; 434 case ATOM_ARG_PS: 435 idx = U8(*ptr); 436 (*ptr)++; 437 ctx->ps[idx] = B_HOST_TO_LENDIAN_INT32(val); 438 break; 439 case ATOM_ARG_WS: 440 idx = U8(*ptr); 441 (*ptr)++; 442 switch(idx) { 443 case ATOM_WS_QUOTIENT: 444 gctx->divmul[0] = val; 445 break; 446 case ATOM_WS_REMAINDER: 447 gctx->divmul[1] = val; 448 break; 449 case ATOM_WS_DATAPTR: 450 gctx->data_block = val; 451 break; 452 case ATOM_WS_SHIFT: 453 gctx->shift = val; 454 break; 455 case ATOM_WS_OR_MASK: 456 case ATOM_WS_AND_MASK: 457 break; 458 case ATOM_WS_FB_WINDOW: 459 gctx->fb_base = val; 460 break; 461 case ATOM_WS_ATTRIBUTES: 462 gctx->io_attr = val; 463 break; 464 case ATOM_WS_REGPTR: 465 gctx->reg_block = val; 466 break; 467 default: 468 ctx->ws[idx] = val; 469 } 470 break; 471 case ATOM_ARG_FB: 472 idx = U8(*ptr); 473 (*ptr)++; 474 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { 475 ERROR("%s: fb tried to write beyond scratch region! " 476 "%" B_PRIu32 " vs. %" B_PRIu32 "\n", __func__, 477 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); 478 } else 479 gctx->scratch[(gctx->fb_base / 4) + idx] = val; 480 break; 481 case ATOM_ARG_PLL: 482 idx = U8(*ptr); 483 (*ptr)++; 484 gctx->card->pll_write(idx, val); 485 break; 486 case ATOM_ARG_MC: 487 idx = U8(*ptr); 488 (*ptr)++; 489 gctx->card->mc_write(idx, val); 490 return; 491 } 492 } 493 494 495 static void 496 atom_op_add(atom_exec_context *ctx, int *ptr, int arg) 497 { 498 uint8 attr = U8((*ptr)++); 499 uint32 dst, src, saved = 0; 500 int dptr = *ptr; 501 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 502 src = atom_get_src(ctx, attr, ptr); 503 #ifdef TRACE_ATOM 504 TRACE("%s: 0x%" B_PRIX32 " + 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 505 __func__, dst, src, dst + src); 506 #endif 507 dst += src; 508 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 509 } 510 511 512 static void 513 atom_op_and(atom_exec_context *ctx, int *ptr, int arg) 514 { 515 uint8 attr = U8((*ptr)++); 516 uint32 dst, src, saved = 0; 517 int dptr = *ptr; 518 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 519 src = atom_get_src(ctx, attr, ptr); 520 #ifdef TRACE_ATOM 521 TRACE("%s: 0x%" B_PRIX32 " & 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 522 __func__, src, dst, dst & src); 523 #endif 524 dst &= src; 525 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 526 } 527 528 529 static void 530 atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) 531 { 532 TRACE("%s: Quack!\n", __func__); 533 } 534 535 536 static void 537 atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) 538 { 539 int idx = U8((*ptr)++); 540 status_t result = B_OK; 541 542 if (idx < ATOM_TABLE_NAMES_CNT) { 543 TRACE("%s: table: %s (%d)\n", __func__, atom_table_names[idx], idx); 544 } else { 545 ERROR("%s: table: unknown (%d)\n", __func__, idx); 546 } 547 548 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) { 549 result = atom_execute_table_locked(ctx->ctx, 550 idx, ctx->ps + ctx->ps_shift); 551 } 552 553 if (result != B_OK) 554 ctx->abort = true; 555 } 556 557 558 static void 559 atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) 560 { 561 uint8 attr = U8((*ptr)++); 562 uint32 saved = 0; 563 int dptr = *ptr; 564 attr &= 0x38; 565 attr |= atom_def_dst[attr>>3]<<6; 566 atom_get_dst(ctx, arg, attr, ptr, &saved, 0); 567 TRACE("%s\n", __func__); 568 atom_put_dst(ctx, arg, attr, &dptr, 0, saved); 569 } 570 571 572 static void 573 atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) 574 { 575 uint8 attr = U8((*ptr)++); 576 uint32 dst, src; 577 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 578 src = atom_get_src(ctx, attr, ptr); 579 ctx->ctx->cs_equal = (dst == src); 580 ctx->ctx->cs_above = (dst > src); 581 TRACE("%s: 0x%" B_PRIX32 " %s 0x%" B_PRIX32 "\n", __func__, 582 dst, ctx->ctx->cs_above ? ">" : "<=", src); 583 } 584 585 586 static void 587 atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) 588 { 589 bigtime_t count = U8((*ptr)++); 590 if (arg == ATOM_UNIT_MICROSEC) { 591 TRACE("%s: %" B_PRId64 " microseconds\n", __func__, count); 592 // Microseconds 593 snooze(count); 594 } else { 595 TRACE("%s: %" B_PRId64 " milliseconds\n", __func__, count); 596 // Milliseconds 597 snooze(count * 1000); 598 } 599 } 600 601 602 static void 603 atom_op_div(atom_exec_context *ctx, int *ptr, int arg) 604 { 605 uint8 attr = U8((*ptr)++); 606 uint32 dst, src; 607 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 608 src = atom_get_src(ctx, attr, ptr); 609 if (src != 0) { 610 ctx->ctx->divmul[0] = dst / src; 611 ctx->ctx->divmul[1] = dst%src; 612 } else { 613 ctx->ctx->divmul[0] = 0; 614 ctx->ctx->divmul[1] = 0; 615 } 616 #ifdef ATOM_TRACE 617 TRACE("%s: 0x%" B_PRIX32 " / 0x%" B_PRIX32 " is 0x%" B_PRIX32 618 " remander 0x%" B_PRIX32 "\n", __func__, dst, src, 619 ctx->ctx->divmul[0], ctx->ctx->divmul[1]); 620 #endif 621 } 622 623 624 static void 625 atom_op_div32(atom_exec_context *ctx, int *ptr, int arg) 626 { 627 uint64 val64; 628 uint8 attr = U8((*ptr)++); 629 uint32 dst, src; 630 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 631 src = atom_get_src(ctx, attr, ptr); 632 if (src != 0) { 633 val64 = dst; 634 val64 |= ((uint64)ctx->ctx->divmul[1]) << 32; 635 val64 /= (uint64)src; 636 // Lower 32 637 ctx->ctx->divmul[0] = (uint32)val64; 638 // Upper 32 639 ctx->ctx->divmul[1] = (uint32)(((val64) >> 16) >> 16); 640 } else { 641 ctx->ctx->divmul[0] = 0; 642 ctx->ctx->divmul[1] = 0; 643 } 644 #ifdef ATOM_TRACE 645 TRACE("%s: 0x%" B_PRIX32 " / 0x%" B_PRIX32 " is" 646 " 0x%" B_PRIX32 " + 0x%" B_PRIX32 "\n", __func__, dst, src, 647 ctx->ctx->divmul[0], ctx->ctx->divmul[1]); 648 #endif 649 } 650 651 652 static void 653 atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) 654 { 655 /* functionally, a nop */ 656 } 657 658 659 static void 660 atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) 661 { 662 int execute = 0, target = U16(*ptr); 663 (*ptr) += 2; 664 switch(arg) { 665 case ATOM_COND_ABOVE: 666 execute = ctx->ctx->cs_above; 667 break; 668 case ATOM_COND_ABOVEOREQUAL: 669 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal; 670 break; 671 case ATOM_COND_ALWAYS: 672 execute = 1; 673 break; 674 case ATOM_COND_BELOW: 675 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal); 676 break; 677 case ATOM_COND_BELOWOREQUAL: 678 execute = !ctx->ctx->cs_above; 679 break; 680 case ATOM_COND_EQUAL: 681 execute = ctx->ctx->cs_equal; 682 break; 683 case ATOM_COND_NOTEQUAL: 684 execute = !ctx->ctx->cs_equal; 685 break; 686 } 687 TRACE("%s: execute jump: %s; target: 0x%04X\n", __func__, 688 execute? "yes" : "no", target); 689 690 if (execute) { 691 // Time based jmp timeout 692 if (ctx->lastJump == (ctx->start + target)) { 693 bigtime_t loopDuration = system_time() - ctx->jumpStart; 694 if (loopDuration > ATOM_OP_JMP_TIMEOUT * 1000000) { 695 ERROR("%s: Error: AtomBIOS stuck in loop for more then %d " 696 "seconds. (%" B_PRIu32 " identical jmp op's)\n", __func__, 697 ATOM_OP_JMP_TIMEOUT, ctx->lastJumpCount); 698 ctx->abort = true; 699 } else 700 ctx->lastJumpCount++; 701 } else { 702 ctx->jumpStart = system_time(); 703 ctx->lastJump = ctx->start + target; 704 ctx->lastJumpCount = 1; 705 } 706 *ptr = ctx->start + target; 707 } 708 } 709 710 711 static void 712 atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) 713 { 714 uint8 attr = U8((*ptr)++); 715 uint32 dst, mask, src, saved = 0; 716 int dptr = *ptr; 717 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 718 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); 719 src = atom_get_src(ctx, attr, ptr); 720 dst &= mask; 721 dst |= src; 722 TRACE("%s: src: 0x%" B_PRIX32 " mask 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 723 __func__, src, mask, dst); 724 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 725 } 726 727 728 static void 729 atom_op_move(atom_exec_context *ctx, int *ptr, int arg) 730 { 731 uint8 attr = U8((*ptr)++); 732 uint32 src, saved; 733 int dptr = *ptr; 734 if (((attr >> 3) & 7) != ATOM_SRC_DWORD) 735 atom_get_dst(ctx, arg, attr, ptr, &saved, 0); 736 else { 737 atom_skip_dst(ctx, arg, attr, ptr); 738 saved = 0xCDCDCDCD; 739 } 740 src = atom_get_src(ctx, attr, ptr); 741 TRACE("%s: src: 0x%" B_PRIX32 "; saved: 0x%" B_PRIX32 "\n", 742 __func__, src, saved); 743 atom_put_dst(ctx, arg, attr, &dptr, src, saved); 744 } 745 746 747 static void 748 atom_op_mul(atom_exec_context *ctx, int *ptr, int arg) 749 { 750 uint8 attr = U8((*ptr)++); 751 uint32 dst, src; 752 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 753 src = atom_get_src(ctx, attr, ptr); 754 ctx->ctx->divmul[0] = dst * src; 755 TRACE("%s: 0x%" B_PRIX32 " * 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 756 __func__, dst, src, ctx->ctx->divmul[0]); 757 } 758 759 760 static void 761 atom_op_mul32(atom_exec_context *ctx, int *ptr, int arg) 762 { 763 uint64 val64; 764 uint8 attr = U8((*ptr)++); 765 uint32 dst, src; 766 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 767 src = atom_get_src(ctx, attr, ptr); 768 val64 = (uint64)dst * (uint64)src; 769 // Lower 32 770 ctx->ctx->divmul[0] = (uint32)val64; 771 // Upper 32 772 ctx->ctx->divmul[1] = (uint32)(((val64) >> 16) >> 16); 773 #ifdef ATOM_TRACE 774 TRACE("%s: 0x%" B_PRIX32 " * 0x%" B_PRIX32 " is" 775 " 0x%" B_PRIX32 " + 0x%" B_PRIX32 "\n", __func__, dst, src, 776 ctx->ctx->divmul[0], ctx->ctx->divmul[1]); 777 #endif 778 } 779 780 781 static void 782 atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) 783 { 784 /* nothing */ 785 } 786 787 788 static void 789 atom_op_or(atom_exec_context *ctx, int *ptr, int arg) 790 { 791 uint8 attr = U8((*ptr)++); 792 uint32 dst, src, saved = 0; 793 int dptr = *ptr; 794 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 795 src = atom_get_src(ctx, attr, ptr); 796 #ifdef ATOM_TRACE 797 TRACE("%s: 0x%" B_PRIX32 " | 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 798 __func__, dst, src, dst | src); 799 #endif 800 dst |= src; 801 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 802 } 803 804 805 static void 806 atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg) 807 { 808 #ifdef ATOM_TRACE 809 uint8 val = U8((*ptr)++); 810 TRACE("%s: POST card output: 0x%" B_PRIX8 "\n", __func__, val); 811 #endif 812 } 813 814 815 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg) 816 { 817 TRACE("%s: unimplemented!\n", __func__); 818 } 819 820 821 static void 822 atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg) 823 { 824 TRACE("%s: unimplemented!\n", __func__); 825 } 826 827 828 static void 829 atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg) 830 { 831 TRACE("%s: unimplemented!\n", __func__); 832 } 833 834 835 static void 836 atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg) 837 { 838 int idx = U8(*ptr); 839 (*ptr)++; 840 TRACE("%s: block: %d\n", __func__, idx); 841 if (!idx) 842 ctx->ctx->data_block = 0; 843 else if (idx == 255) 844 ctx->ctx->data_block = ctx->start; 845 else 846 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx); 847 } 848 849 850 static void 851 atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg) 852 { 853 uint8 attr = U8((*ptr)++); 854 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr); 855 TRACE("%s: fb_base: 0x%" B_PRIX32 "\n", __func__, ctx->ctx->fb_base); 856 } 857 858 859 static void 860 atom_op_setport(atom_exec_context *ctx, int *ptr, int arg) 861 { 862 int port; 863 switch(arg) { 864 case ATOM_PORT_ATI: 865 port = U16(*ptr); 866 if (port < ATOM_IO_NAMES_CNT) { 867 TRACE("%s: port: %d (%s)\n", __func__, 868 port, atom_io_names[port]); 869 } else 870 TRACE("%s: port: %d\n", __func__, port); 871 if (!port) 872 ctx->ctx->io_mode = ATOM_IO_MM; 873 else 874 ctx->ctx->io_mode = ATOM_IO_IIO | port; 875 (*ptr) += 2; 876 break; 877 case ATOM_PORT_PCI: 878 ctx->ctx->io_mode = ATOM_IO_PCI; 879 (*ptr)++; 880 break; 881 case ATOM_PORT_SYSIO: 882 ctx->ctx->io_mode = ATOM_IO_SYSIO; 883 (*ptr)++; 884 break; 885 } 886 } 887 888 889 static void 890 atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) 891 { 892 ctx->ctx->reg_block = U16(*ptr); 893 (*ptr)+=2; 894 } 895 896 897 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) 898 { 899 uint8 attr = U8((*ptr)++), shift; 900 uint32 saved = 0, dst; 901 int dptr = *ptr; 902 attr &= 0x38; 903 attr |= atom_def_dst[attr >> 3] << 6; 904 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 905 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); 906 #ifdef ATOM_TRACE 907 TRACE("%s: 0x%" B_PRIX32 " << %" B_PRId8 " is 0X%" B_PRIX32 "\n", 908 __func__, dst, shift, dst << shift); 909 #endif 910 dst <<= shift; 911 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 912 } 913 914 915 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) 916 { 917 uint8 attr = U8((*ptr)++), shift; 918 uint32 saved = 0, dst; 919 int dptr = *ptr; 920 attr &= 0x38; 921 attr |= atom_def_dst[attr >> 3] << 6; 922 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 923 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); 924 #ifdef ATOM_TRACE 925 TRACE("%s: 0x%" B_PRIX32 " >> %" B_PRId8 " is 0X%" B_PRIX32 "\n", 926 __func__, dst, shift, dst << shift); 927 #endif 928 dst >>= shift; 929 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 930 } 931 932 933 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 934 { 935 uint8 attr = U8((*ptr)++), shift; 936 uint32 saved = 0, dst; 937 int dptr = *ptr; 938 uint32 dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; 939 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 940 /* op needs to full dst value */ 941 dst = saved; 942 shift = atom_get_src(ctx, attr, ptr); 943 #ifdef ATOM_TRACE 944 TRACE("%s: 0x%" B_PRIX32 " << %" B_PRId8 " is 0X%" B_PRIX32 "\n", 945 __func__, dst, shift, dst << shift); 946 #endif 947 dst <<= shift; 948 dst &= atom_arg_mask[dst_align]; 949 dst >>= atom_arg_shift[dst_align]; 950 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 951 } 952 953 954 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) 955 { 956 uint8 attr = U8((*ptr)++), shift; 957 uint32 saved = 0, dst; 958 int dptr = *ptr; 959 uint32 dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; 960 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 961 /* op needs to full dst value */ 962 dst = saved; 963 shift = atom_get_src(ctx, attr, ptr); 964 #ifdef ATOM_TRACE 965 TRACE("%s: 0x%" B_PRIX32 " >> %" B_PRId8 " is 0X%" B_PRIX32 "\n", 966 __func__, dst, shift, dst << shift); 967 #endif 968 dst >>= shift; 969 dst &= atom_arg_mask[dst_align]; 970 dst >>= atom_arg_shift[dst_align]; 971 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 972 } 973 974 975 static void 976 atom_op_sub(atom_exec_context *ctx, int *ptr, int arg) 977 { 978 uint8 attr = U8((*ptr)++); 979 uint32 dst, src, saved = 0; 980 int dptr = *ptr; 981 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 982 src = atom_get_src(ctx, attr, ptr); 983 #ifdef TRACE_ATOM 984 TRACE("%s: 0x%" B_PRIX32 " - 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 985 __func__, dst, src, dst - src); 986 #endif 987 dst -= src; 988 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 989 } 990 991 992 static void 993 atom_op_switch(atom_exec_context *ctx, int *ptr, int arg) 994 { 995 uint8 attr = U8((*ptr)++); 996 uint32 src, val, target; 997 TRACE("%s: switch start\n", __func__); 998 src = atom_get_src(ctx, attr, ptr); 999 while (U16(*ptr) != ATOM_CASE_END) 1000 if (U8(*ptr) == ATOM_CASE_MAGIC) { 1001 (*ptr)++; 1002 TRACE("%s: switch case\n", __func__); 1003 val = atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM, ptr); 1004 target = U16(*ptr); 1005 if (val == src) { 1006 *ptr = ctx->start + target; 1007 return; 1008 } 1009 (*ptr) += 2; 1010 } else { 1011 TRACE("%s: ERROR bad case\n", __func__); 1012 return; 1013 } 1014 (*ptr) += 2; 1015 } 1016 1017 1018 static void 1019 atom_op_test(atom_exec_context *ctx, int *ptr, int arg) 1020 { 1021 uint8 attr = U8((*ptr)++); 1022 uint32 dst, src; 1023 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 1024 src = atom_get_src(ctx, attr, ptr); 1025 // logic is reversed 1026 ctx->ctx->cs_equal = ((dst & src) == 0); 1027 TRACE("%s: 0x%" B_PRIX32 " and 0x%" B_PRIX32 " are %s\n", __func__, 1028 dst, src, ctx->ctx->cs_equal ? "NE" : "EQ"); 1029 } 1030 1031 1032 static void 1033 atom_op_xor(atom_exec_context *ctx, int *ptr, int arg) 1034 { 1035 uint8 attr = U8((*ptr)++); 1036 uint32 dst, src, saved = 0; 1037 int dptr = *ptr; 1038 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 1039 src = atom_get_src(ctx, attr, ptr); 1040 #ifdef ATOM_TRACE 1041 TRACE("%s: 0x%" B_PRIX32 " ^ 0X%" B_PRIX32 " is " B_PRIX32 "\n", 1042 __func__, dst, src, dst ^ src); 1043 #endif 1044 dst ^= src; 1045 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 1046 } 1047 1048 1049 static void 1050 atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) 1051 { 1052 #ifdef ATOM_TRACE 1053 uint8 val = U8((*ptr)++); 1054 TRACE("AtomBIOS DEBUG OP: 0x%02X\n", val); 1055 #endif 1056 } 1057 1058 1059 static void 1060 atom_op_processds(atom_exec_context *ctx, int *ptr, int arg) 1061 { 1062 uint16 val = U16(*ptr); 1063 (*ptr) += val + 2; 1064 TRACE("%s: Processds output: 0x%02X\n", __func__, val); 1065 } 1066 1067 1068 static struct { 1069 void (*func)(atom_exec_context *, int *, int); 1070 int arg; 1071 } opcode_table[ATOM_OP_CNT] = { 1072 { NULL, 0 }, 1073 { atom_op_move, ATOM_ARG_REG }, 1074 { atom_op_move, ATOM_ARG_PS }, 1075 { atom_op_move, ATOM_ARG_WS }, 1076 { atom_op_move, ATOM_ARG_FB }, 1077 { atom_op_move, ATOM_ARG_PLL }, 1078 { atom_op_move, ATOM_ARG_MC }, 1079 { atom_op_and, ATOM_ARG_REG }, 1080 { atom_op_and, ATOM_ARG_PS }, 1081 { atom_op_and, ATOM_ARG_WS }, 1082 { atom_op_and, ATOM_ARG_FB }, 1083 { atom_op_and, ATOM_ARG_PLL }, 1084 { atom_op_and, ATOM_ARG_MC }, 1085 { atom_op_or, ATOM_ARG_REG }, 1086 { atom_op_or, ATOM_ARG_PS }, 1087 { atom_op_or, ATOM_ARG_WS }, 1088 { atom_op_or, ATOM_ARG_FB }, 1089 { atom_op_or, ATOM_ARG_PLL }, 1090 { atom_op_or, ATOM_ARG_MC }, 1091 { atom_op_shift_left, ATOM_ARG_REG }, 1092 { atom_op_shift_left, ATOM_ARG_PS }, 1093 { atom_op_shift_left, ATOM_ARG_WS }, 1094 { atom_op_shift_left, ATOM_ARG_FB }, 1095 { atom_op_shift_left, ATOM_ARG_PLL }, 1096 { atom_op_shift_left, ATOM_ARG_MC }, 1097 { atom_op_shift_right, ATOM_ARG_REG }, 1098 { atom_op_shift_right, ATOM_ARG_PS }, 1099 { atom_op_shift_right, ATOM_ARG_WS }, 1100 { atom_op_shift_right, ATOM_ARG_FB }, 1101 { atom_op_shift_right, ATOM_ARG_PLL }, 1102 { atom_op_shift_right, ATOM_ARG_MC }, 1103 { atom_op_mul, ATOM_ARG_REG }, 1104 { atom_op_mul, ATOM_ARG_PS }, 1105 { atom_op_mul, ATOM_ARG_WS }, 1106 { atom_op_mul, ATOM_ARG_FB }, 1107 { atom_op_mul, ATOM_ARG_PLL }, 1108 { atom_op_mul, ATOM_ARG_MC }, 1109 { atom_op_div, ATOM_ARG_REG }, 1110 { atom_op_div, ATOM_ARG_PS }, 1111 { atom_op_div, ATOM_ARG_WS }, 1112 { atom_op_div, ATOM_ARG_FB }, 1113 { atom_op_div, ATOM_ARG_PLL }, 1114 { atom_op_div, ATOM_ARG_MC }, 1115 { atom_op_add, ATOM_ARG_REG }, 1116 { atom_op_add, ATOM_ARG_PS }, 1117 { atom_op_add, ATOM_ARG_WS }, 1118 { atom_op_add, ATOM_ARG_FB }, 1119 { atom_op_add, ATOM_ARG_PLL }, 1120 { atom_op_add, ATOM_ARG_MC }, 1121 { atom_op_sub, ATOM_ARG_REG }, 1122 { atom_op_sub, ATOM_ARG_PS }, 1123 { atom_op_sub, ATOM_ARG_WS }, 1124 { atom_op_sub, ATOM_ARG_FB }, 1125 { atom_op_sub, ATOM_ARG_PLL }, 1126 { atom_op_sub, ATOM_ARG_MC }, 1127 { atom_op_setport, ATOM_PORT_ATI }, 1128 { atom_op_setport, ATOM_PORT_PCI }, 1129 { atom_op_setport, ATOM_PORT_SYSIO }, 1130 { atom_op_setregblock, 0 }, 1131 { atom_op_setfbbase, 0 }, 1132 { atom_op_compare, ATOM_ARG_REG }, 1133 { atom_op_compare, ATOM_ARG_PS }, 1134 { atom_op_compare, ATOM_ARG_WS }, 1135 { atom_op_compare, ATOM_ARG_FB }, 1136 { atom_op_compare, ATOM_ARG_PLL }, 1137 { atom_op_compare, ATOM_ARG_MC }, 1138 { atom_op_switch, 0 }, 1139 { atom_op_jump, ATOM_COND_ALWAYS }, 1140 { atom_op_jump, ATOM_COND_EQUAL }, 1141 { atom_op_jump, ATOM_COND_BELOW }, 1142 { atom_op_jump, ATOM_COND_ABOVE }, 1143 { atom_op_jump, ATOM_COND_BELOWOREQUAL }, 1144 { atom_op_jump, ATOM_COND_ABOVEOREQUAL }, 1145 { atom_op_jump, ATOM_COND_NOTEQUAL }, 1146 { atom_op_test, ATOM_ARG_REG }, 1147 { atom_op_test, ATOM_ARG_PS }, 1148 { atom_op_test, ATOM_ARG_WS }, 1149 { atom_op_test, ATOM_ARG_FB }, 1150 { atom_op_test, ATOM_ARG_PLL }, 1151 { atom_op_test, ATOM_ARG_MC }, 1152 { atom_op_delay, ATOM_UNIT_MILLISEC }, 1153 { atom_op_delay, ATOM_UNIT_MICROSEC }, 1154 { atom_op_calltable, 0 }, 1155 { atom_op_repeat, 0 }, 1156 { atom_op_clear, ATOM_ARG_REG }, 1157 { atom_op_clear, ATOM_ARG_PS }, 1158 { atom_op_clear, ATOM_ARG_WS }, 1159 { atom_op_clear, ATOM_ARG_FB }, 1160 { atom_op_clear, ATOM_ARG_PLL }, 1161 { atom_op_clear, ATOM_ARG_MC }, 1162 { atom_op_nop, 0 }, 1163 { atom_op_eot, 0 }, 1164 { atom_op_mask, ATOM_ARG_REG }, 1165 { atom_op_mask, ATOM_ARG_PS }, 1166 { atom_op_mask, ATOM_ARG_WS }, 1167 { atom_op_mask, ATOM_ARG_FB }, 1168 { atom_op_mask, ATOM_ARG_PLL }, 1169 { atom_op_mask, ATOM_ARG_MC }, 1170 { atom_op_postcard, 0 }, 1171 { atom_op_beep, 0 }, 1172 { atom_op_savereg, 0 }, 1173 { atom_op_restorereg, 0 }, 1174 { atom_op_setdatablock, 0 }, 1175 { atom_op_xor, ATOM_ARG_REG }, 1176 { atom_op_xor, ATOM_ARG_PS }, 1177 { atom_op_xor, ATOM_ARG_WS }, 1178 { atom_op_xor, ATOM_ARG_FB }, 1179 { atom_op_xor, ATOM_ARG_PLL }, 1180 { atom_op_xor, ATOM_ARG_MC }, 1181 { atom_op_shl, ATOM_ARG_REG }, 1182 { atom_op_shl, ATOM_ARG_PS }, 1183 { atom_op_shl, ATOM_ARG_WS }, 1184 { atom_op_shl, ATOM_ARG_FB }, 1185 { atom_op_shl, ATOM_ARG_PLL }, 1186 { atom_op_shl, ATOM_ARG_MC }, 1187 { atom_op_shr, ATOM_ARG_REG }, 1188 { atom_op_shr, ATOM_ARG_PS }, 1189 { atom_op_shr, ATOM_ARG_WS }, 1190 { atom_op_shr, ATOM_ARG_FB }, 1191 { atom_op_shr, ATOM_ARG_PLL }, 1192 { atom_op_shr, ATOM_ARG_MC }, 1193 { atom_op_debug, 0 }, 1194 { atom_op_processds, 0}, 1195 { atom_op_mul32, ATOM_ARG_PS}, 1196 { atom_op_mul32, ATOM_ARG_WS}, 1197 { atom_op_div32, ATOM_ARG_PS}, 1198 { atom_op_div32, ATOM_ARG_WS} 1199 }; 1200 1201 1202 status_t 1203 atom_execute_table_locked(atom_context *ctx, int index, uint32 * params) 1204 { 1205 int base = CU16(ctx->cmd_table + 4 + 2 * index); 1206 int len, ws, ps, ptr; 1207 unsigned char op; 1208 atom_exec_context ectx; 1209 1210 if (!base) { 1211 ERROR("%s: BUG: Table called doesn't exist in AtomBIOS!\n", __func__); 1212 return B_ERROR; 1213 } 1214 1215 len = CU16(base + ATOM_CT_SIZE_PTR); 1216 ws = CU8(base + ATOM_CT_WS_PTR); 1217 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK; 1218 ptr = base + ATOM_CT_CODE_PTR; 1219 1220 ectx.ctx = ctx; 1221 ectx.ps_shift = ps / 4; 1222 ectx.start = base; 1223 ectx.ps = params; 1224 ectx.abort = false; 1225 ectx.lastJump = 0; 1226 ectx.lastJumpCount = 0; 1227 ectx.jumpStart = 0; 1228 if (ws) 1229 ectx.ws = (uint32*)malloc(4 * ws); 1230 else 1231 ectx.ws = NULL; 1232 1233 debug_depth++; 1234 while (1) { 1235 op = CU8(ptr++); 1236 const char* operationName; 1237 1238 if (op < ATOM_OP_NAMES_CNT) 1239 operationName = atom_op_names[op]; 1240 else 1241 operationName = "UNKNOWN"; 1242 1243 TRACE("%s: %s @ 0x%" B_PRIX16 "\n", __func__, operationName, ptr - 1); 1244 1245 if (ectx.abort == true) { 1246 ERROR("%s: AtomBIOS parser aborted calling operation %s" 1247 " (0x%" B_PRIX8 ") @ 0x%" B_PRIX16 "\n", __func__, 1248 operationName, op, ptr - 1); 1249 free(ectx.ws); 1250 return B_ERROR; 1251 } 1252 1253 if (op < ATOM_OP_CNT && op > 0) 1254 opcode_table[op].func(&ectx, &ptr, opcode_table[op].arg); 1255 else 1256 break; 1257 1258 if (op == ATOM_OP_EOT) 1259 break; 1260 } 1261 debug_depth--; 1262 1263 free(ectx.ws); 1264 return B_OK; 1265 } 1266 1267 1268 status_t 1269 atom_execute_table(atom_context *ctx, int index, uint32 *params) 1270 { 1271 if (acquire_sem_etc(ctx->exec_sem, 1, B_RELATIVE_TIMEOUT, 5000000) 1272 != B_NO_ERROR) { 1273 ERROR("%s: Timeout to obtain semaphore!\n", __func__); 1274 return B_ERROR; 1275 } 1276 /* reset data block */ 1277 ctx->data_block = 0; 1278 /* reset reg block */ 1279 ctx->reg_block = 0; 1280 /* reset fb window */ 1281 ctx->fb_base = 0; 1282 /* reset io mode */ 1283 ctx->io_mode = ATOM_IO_MM; 1284 /* reset divmul */ 1285 ctx->divmul[0] = 0; 1286 ctx->divmul[1] = 0; 1287 status_t result = atom_execute_table_locked(ctx, index, params); 1288 if (result != B_OK) { 1289 const char* tableName; 1290 if (index < ATOM_TABLE_NAMES_CNT) 1291 tableName = atom_table_names[index]; 1292 else 1293 tableName = "Unknown"; 1294 1295 ERROR("%s: AtomBIOS parser was aborted in table %s (0x%" B_PRIX8 ")\n", 1296 __func__, tableName, index); 1297 } 1298 1299 release_sem(ctx->exec_sem); 1300 return result; 1301 } 1302 1303 1304 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; 1305 1306 1307 static void 1308 atom_index_iio(atom_context *ctx, int base) 1309 { 1310 ctx->iio = (uint16*)malloc(2 * 256); 1311 while (CU8(base) == ATOM_IIO_START) { 1312 ctx->iio[CU8(base + 1)] = base + 2; 1313 base += 2; 1314 while (CU8(base) != ATOM_IIO_END) 1315 base += atom_iio_len[CU8(base)]; 1316 base += 3; 1317 } 1318 } 1319 1320 1321 atom_context* 1322 atom_parse(card_info *card, uint8 *bios) 1323 { 1324 atom_context *ctx = (atom_context*)malloc(sizeof(atom_context)); 1325 1326 if (ctx == NULL) { 1327 ERROR("%s: Error: No memory for atom_context mapping\n", __func__); 1328 return NULL; 1329 } 1330 1331 ctx->card = card; 1332 ctx->bios = bios; 1333 1334 if (CU16(0) != ATOM_BIOS_MAGIC) { 1335 ERROR("Invalid BIOS magic.\n"); 1336 free(ctx); 1337 return NULL; 1338 } 1339 if (strncmp(CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC, 1340 strlen(ATOM_ATI_MAGIC))) { 1341 ERROR("Invalid ATI magic.\n"); 1342 free(ctx); 1343 return NULL; 1344 } 1345 1346 int base = CU16(ATOM_ROM_TABLE_PTR); 1347 if (strncmp(CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC, 1348 strlen(ATOM_ROM_MAGIC))) { 1349 ERROR("Invalid ATOM magic.\n"); 1350 free(ctx); 1351 return NULL; 1352 } 1353 1354 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR); 1355 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR); 1356 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4); 1357 1358 char *str = CSTR(CU16(base + ATOM_ROM_MSG_PTR)); 1359 while (*str && ((*str == '\n') || (*str == '\r'))) 1360 str++; 1361 1362 int i; 1363 char name[512]; 1364 // Terminate bios string if not 0 terminated 1365 for (i = 0; i < 511; i++) { 1366 name[i] = str[i]; 1367 if (name[i] < '.' || name[i] > 'z') { 1368 name[i] = 0; 1369 break; 1370 } 1371 } 1372 1373 TRACE("ATOM BIOS: %s", name); 1374 1375 return ctx; 1376 } 1377 1378 1379 status_t 1380 atom_asic_init(atom_context *ctx) 1381 { 1382 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); 1383 uint32 ps[16]; 1384 memset(ps, 0, 64); 1385 1386 ps[0] = B_HOST_TO_LENDIAN_INT32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); 1387 ps[1] = B_HOST_TO_LENDIAN_INT32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR)); 1388 if (!ps[0] || !ps[1]) 1389 return B_ERROR; 1390 1391 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) 1392 return B_ERROR; 1393 1394 return atom_execute_table(ctx, ATOM_CMD_INIT, ps); 1395 } 1396 1397 1398 void 1399 atom_destroy(atom_context *ctx) 1400 { 1401 if (ctx != NULL) { 1402 free(ctx->iio); 1403 free(ctx->scratch); 1404 delete_sem(ctx->exec_sem); 1405 } 1406 1407 free(ctx); 1408 } 1409 1410 1411 status_t 1412 atom_parse_data_header(atom_context *ctx, int index, uint16 *size, 1413 uint8 *frev, uint8 *crev, uint16 *data_start) 1414 { 1415 int offset = index * 2 + 4; 1416 int idx = CU16(ctx->data_table + offset); 1417 uint16 *mdt = (uint16*)(ctx->bios + ctx->data_table + 4); 1418 1419 if (!mdt[index]) 1420 return B_ERROR; 1421 1422 if (size) 1423 *size = CU16(idx); 1424 if (frev) 1425 *frev = CU8(idx + 2); 1426 if (crev) 1427 *crev = CU8(idx + 3); 1428 *data_start = idx; 1429 return B_OK; 1430 } 1431 1432 1433 status_t 1434 atom_parse_cmd_header(atom_context *ctx, int index, uint8 * frev, 1435 uint8 * crev) 1436 { 1437 int offset = index * 2 + 4; 1438 int idx = CU16(ctx->cmd_table + offset); 1439 uint16 *mct = (uint16*)(ctx->bios + ctx->cmd_table + 4); 1440 1441 if (!mct[index]) 1442 return B_ERROR; 1443 1444 if (frev) 1445 *frev = CU8(idx + 2); 1446 if (crev) 1447 *crev = CU8(idx + 3); 1448 return B_OK; 1449 } 1450 1451 1452 status_t 1453 atom_allocate_fb_scratch(atom_context *ctx) 1454 { 1455 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); 1456 uint16 data_offset; 1457 int usage_bytes = 0; 1458 _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware; 1459 1460 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset) 1461 == B_OK) { 1462 firmware = (_ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); 1463 1464 _ATOM_FIRMWARE_VRAM_RESERVE_INFO *reserveInfo 1465 = &firmware->asFirmwareVramReserveInfo[0]; 1466 1467 TRACE("Atom firmware requested 0x%" B_PRIX32 " %" B_PRIu16 "kb\n", 1468 B_LENDIAN_TO_HOST_INT32(reserveInfo->ulStartAddrUsedByFirmware), 1469 B_LENDIAN_TO_HOST_INT16(reserveInfo->usFirmwareUseInKb)); 1470 1471 usage_bytes 1472 = B_LENDIAN_TO_HOST_INT16(reserveInfo->usFirmwareUseInKb) * 1024; 1473 } 1474 ctx->scratch_size_bytes = 0; 1475 if (usage_bytes == 0) 1476 usage_bytes = 20 * 1024; 1477 /* allocate some scratch memory */ 1478 ctx->scratch = (uint32*)malloc(usage_bytes); 1479 if (!ctx->scratch) 1480 return B_NO_MEMORY; 1481 1482 ctx->scratch_size_bytes = usage_bytes; 1483 return B_OK; 1484 } 1485