1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Author: Stanislaw Skowronek 23 */ 24 25 /* Rewritten for the Haiku Operating System Radeon HD driver 26 * Author: 27 * Alexander von Gluck, kallisti5@unixzen.com 28 */ 29 30 31 #include <Debug.h> 32 33 #include "atom.h" 34 #include "atom-names.h" 35 #include "atom-bits.h" 36 37 38 /* AtomBIOS loop detection 39 * Number of repeat AtomBIOS jmp operations 40 * before bailing due to stuck in a loop 41 */ 42 #define ATOM_OP_JMP_TIMEOUT 128 43 44 // *** Tracing 45 #undef TRACE 46 //#define TRACE_ATOM 47 #ifdef TRACE_ATOM 48 # define TRACE(x...) _sPrintf("radeon_hd: " x) 49 #else 50 # define TRACE(x...) ; 51 #endif 52 53 #define ERROR(x...) _sPrintf("radeon_hd: " x) 54 55 #define ATOM_COND_ABOVE 0 56 #define ATOM_COND_ABOVEOREQUAL 1 57 #define ATOM_COND_ALWAYS 2 58 #define ATOM_COND_BELOW 3 59 #define ATOM_COND_BELOWOREQUAL 4 60 #define ATOM_COND_EQUAL 5 61 #define ATOM_COND_NOTEQUAL 6 62 63 #define ATOM_PORT_ATI 0 64 #define ATOM_PORT_PCI 1 65 #define ATOM_PORT_SYSIO 2 66 67 #define ATOM_UNIT_MICROSEC 0 68 #define ATOM_UNIT_MILLISEC 1 69 70 #define PLL_INDEX 2 71 #define PLL_DATA 3 72 73 74 typedef struct { 75 atom_context *ctx; 76 77 uint32 *ps, *ws; 78 int ps_shift; 79 uint16 start; 80 uint16 last_jump; 81 uint16 last_jump_count; 82 bool abort; 83 } atom_exec_context; 84 85 int atom_debug = 0; 86 status_t atom_execute_table_locked(atom_context *ctx, 87 int index, uint32 *params); 88 status_t atom_execute_table(atom_context *ctx, int index, uint32 *params); 89 90 static uint32 atom_arg_mask[8] = {0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 91 0xFF, 0xFF00, 0xFF0000, 0xFF000000}; 92 static int atom_arg_shift[8] = {0, 0, 8, 16, 0, 8, 16, 24}; 93 static int atom_dst_to_src[8][4] = { 94 // translate destination alignment field to the source alignment encoding 95 { 0, 0, 0, 0 }, 96 { 1, 2, 3, 0 }, 97 { 1, 2, 3, 0 }, 98 { 1, 2, 3, 0 }, 99 { 4, 5, 6, 7 }, 100 { 4, 5, 6, 7 }, 101 { 4, 5, 6, 7 }, 102 { 4, 5, 6, 7 }, 103 }; 104 static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 }; 105 106 static int debug_depth = 0; 107 108 static uint32 109 atom_iio_execute(atom_context *ctx, int base, uint32 index, uint32 data) 110 { 111 uint32 temp = 0xCDCDCDCD; 112 while (1) 113 switch(CU8(base)) { 114 case ATOM_IIO_NOP: 115 base++; 116 break; 117 case ATOM_IIO_READ: 118 temp = ctx->card->ioreg_read(CU16(base + 1)); 119 base += 3; 120 break; 121 case ATOM_IIO_WRITE: 122 (void)ctx->card->reg_read(CU16(base + 1)); 123 ctx->card->ioreg_write(CU16(base + 1), temp); 124 base += 3; 125 break; 126 case ATOM_IIO_CLEAR: 127 temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 2)); 128 base += 3; 129 break; 130 case ATOM_IIO_SET: 131 temp |= (0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 2); 132 base += 3; 133 break; 134 case ATOM_IIO_MOVE_INDEX: 135 temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); 136 temp |= ((index >> CU8(base + 2)) 137 & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 3); 138 base += 4; 139 break; 140 case ATOM_IIO_MOVE_DATA: 141 temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); 142 temp |= ((data >> CU8(base + 2)) 143 & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 3); 144 base += 4; 145 break; 146 case ATOM_IIO_MOVE_ATTR: 147 temp &= ~((0xFFFFFFFF >> (32 - CU8(base + 1))) << CU8(base + 3)); 148 temp |= ((ctx->io_attr >> CU8(base + 2)) 149 & (0xFFFFFFFF >> (32 - CU8(base + 1)))) << CU8(base + 3); 150 base += 4; 151 break; 152 case ATOM_IIO_END: 153 return temp; 154 default: 155 TRACE("%s: Unknown IIO opcode.\n", __func__); 156 return 0; 157 } 158 } 159 160 161 static uint32 162 atom_get_src_int(atom_exec_context *ctx, uint8 attr, int *ptr, 163 uint32 *saved, int print) 164 { 165 uint32 idx, val = 0xCDCDCDCD, align, arg; 166 atom_context *gctx = ctx->ctx; 167 arg = attr & 7; 168 align = (attr >> 3) & 7; 169 switch(arg) { 170 case ATOM_ARG_REG: 171 idx = U16(*ptr); 172 (*ptr)+=2; 173 idx += gctx->reg_block; 174 switch(gctx->io_mode) { 175 case ATOM_IO_MM: 176 val = gctx->card->reg_read(idx); 177 break; 178 case ATOM_IO_PCI: 179 TRACE("%s: PCI registers are not implemented.\n", __func__); 180 return 0; 181 case ATOM_IO_SYSIO: 182 TRACE("%s: SYSIO registers are not implemented.\n", 183 __func__); 184 return 0; 185 default: 186 if (!(gctx->io_mode & 0x80)) { 187 TRACE("%s: Bad IO mode.\n", __func__); 188 return 0; 189 } 190 if (!gctx->iio[gctx->io_mode & 0x7F]) { 191 TRACE("%s: Undefined indirect IO read method %d.\n", 192 __func__, gctx->io_mode & 0x7F); 193 return 0; 194 } 195 val = atom_iio_execute(gctx, 196 gctx->iio[gctx->io_mode & 0x7F], idx, 0); 197 } 198 break; 199 case ATOM_ARG_PS: 200 idx = U8(*ptr); 201 (*ptr)++; 202 val = B_LENDIAN_TO_HOST_INT32(ctx->ps[idx]); 203 // TODO : val = get_unaligned_le32((u32 *)&ctx->ps[idx]); 204 break; 205 case ATOM_ARG_WS: 206 idx = U8(*ptr); 207 (*ptr)++; 208 switch(idx) { 209 case ATOM_WS_QUOTIENT: 210 val = gctx->divmul[0]; 211 break; 212 case ATOM_WS_REMAINDER: 213 val = gctx->divmul[1]; 214 break; 215 case ATOM_WS_DATAPTR: 216 val = gctx->data_block; 217 break; 218 case ATOM_WS_SHIFT: 219 val = gctx->shift; 220 break; 221 case ATOM_WS_OR_MASK: 222 val = 1 << gctx->shift; 223 break; 224 case ATOM_WS_AND_MASK: 225 val = ~(1 << gctx->shift); 226 break; 227 case ATOM_WS_FB_WINDOW: 228 val = gctx->fb_base; 229 break; 230 case ATOM_WS_ATTRIBUTES: 231 val = gctx->io_attr; 232 break; 233 case ATOM_WS_REGPTR: 234 val = gctx->reg_block; 235 break; 236 default: 237 val = ctx->ws[idx]; 238 } 239 break; 240 case ATOM_ARG_ID: 241 idx = U16(*ptr); 242 (*ptr) += 2; 243 val = U32(idx + gctx->data_block); 244 break; 245 case ATOM_ARG_FB: 246 idx = U8(*ptr); 247 (*ptr)++; 248 val = gctx->scratch[((gctx->fb_base + idx) / 4)]; 249 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { 250 ERROR("%s: fb tried to read beyond scratch region!" 251 " %" B_PRIu32 " vs. %" B_PRIu32 "\n", __func__, 252 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); 253 val = 0; 254 } else 255 val = gctx->scratch[(gctx->fb_base / 4) + idx]; 256 break; 257 case ATOM_ARG_IMM: 258 switch(align) { 259 case ATOM_SRC_DWORD: 260 val = U32(*ptr); 261 (*ptr)+=4; 262 return val; 263 case ATOM_SRC_WORD0: 264 case ATOM_SRC_WORD8: 265 case ATOM_SRC_WORD16: 266 val = U16(*ptr); 267 (*ptr) += 2; 268 return val; 269 case ATOM_SRC_BYTE0: 270 case ATOM_SRC_BYTE8: 271 case ATOM_SRC_BYTE16: 272 case ATOM_SRC_BYTE24: 273 val = U8(*ptr); 274 (*ptr)++; 275 return val; 276 } 277 return 0; 278 case ATOM_ARG_PLL: 279 idx = U8(*ptr); 280 (*ptr)++; 281 val = gctx->card->pll_read(idx); 282 break; 283 case ATOM_ARG_MC: 284 idx = U8(*ptr); 285 (*ptr)++; 286 val = gctx->card->mc_read(idx); 287 return 0; 288 } 289 if (saved) 290 *saved = val; 291 val &= atom_arg_mask[align]; 292 val >>= atom_arg_shift[align]; 293 return val; 294 } 295 296 297 static void 298 atom_skip_src_int(atom_exec_context *ctx, uint8 attr, int *ptr) 299 { 300 uint32 align = (attr >> 3) & 7, arg = attr & 7; 301 switch(arg) { 302 case ATOM_ARG_REG: 303 case ATOM_ARG_ID: 304 (*ptr) += 2; 305 break; 306 case ATOM_ARG_PLL: 307 case ATOM_ARG_MC: 308 case ATOM_ARG_PS: 309 case ATOM_ARG_WS: 310 case ATOM_ARG_FB: 311 (*ptr)++; 312 break; 313 case ATOM_ARG_IMM: 314 switch(align) { 315 case ATOM_SRC_DWORD: 316 (*ptr) += 4; 317 return; 318 case ATOM_SRC_WORD0: 319 case ATOM_SRC_WORD8: 320 case ATOM_SRC_WORD16: 321 (*ptr) += 2; 322 return; 323 case ATOM_SRC_BYTE0: 324 case ATOM_SRC_BYTE8: 325 case ATOM_SRC_BYTE16: 326 case ATOM_SRC_BYTE24: 327 (*ptr)++; 328 return; 329 } 330 return; 331 } 332 } 333 334 335 static uint32 336 atom_get_src(atom_exec_context *ctx, uint8 attr, int *ptr) 337 { 338 return atom_get_src_int(ctx, attr, ptr, NULL, 1); 339 } 340 341 342 static uint32 343 atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) 344 { 345 uint32 val = 0xCDCDCDCD; 346 347 switch (align) { 348 case ATOM_SRC_DWORD: 349 val = U32(*ptr); 350 (*ptr) += 4; 351 break; 352 case ATOM_SRC_WORD0: 353 case ATOM_SRC_WORD8: 354 case ATOM_SRC_WORD16: 355 val = U16(*ptr); 356 (*ptr) += 2; 357 break; 358 case ATOM_SRC_BYTE0: 359 case ATOM_SRC_BYTE8: 360 case ATOM_SRC_BYTE16: 361 case ATOM_SRC_BYTE24: 362 val = U8(*ptr); 363 (*ptr)++; 364 break; 365 } 366 return val; 367 } 368 369 370 static uint32 371 atom_get_dst(atom_exec_context *ctx, int arg, uint8 attr, 372 int *ptr, uint32 *saved, int print) 373 { 374 return atom_get_src_int(ctx, 375 arg|atom_dst_to_src[(attr>>3)&7][(attr>>6)&3]<<3, ptr, saved, print); 376 } 377 378 379 static void 380 atom_skip_dst(atom_exec_context *ctx, int arg, uint8 attr, int *ptr) 381 { 382 atom_skip_src_int(ctx, 383 arg|atom_dst_to_src[(attr>>3)&7][(attr>>6)&3]<<3, ptr); 384 } 385 386 387 static void 388 atom_put_dst(atom_exec_context *ctx, int arg, uint8 attr, 389 int *ptr, uint32 val, uint32 saved) 390 { 391 uint32 align = atom_dst_to_src[(attr>>3)&7][(attr>>6)&3], 392 old_val = val, idx; 393 atom_context *gctx = ctx->ctx; 394 old_val &= atom_arg_mask[align] >> atom_arg_shift[align]; 395 val <<= atom_arg_shift[align]; 396 val &= atom_arg_mask[align]; 397 saved &= ~atom_arg_mask[align]; 398 val |= saved; 399 switch(arg) { 400 case ATOM_ARG_REG: 401 idx = U16(*ptr); 402 (*ptr) += 2; 403 idx += gctx->reg_block; 404 switch(gctx->io_mode) { 405 case ATOM_IO_MM: 406 if (idx == 0) 407 gctx->card->reg_write(idx, val << 2); 408 else 409 gctx->card->reg_write(idx, val); 410 break; 411 case ATOM_IO_PCI: 412 TRACE("%s: PCI registers are not implemented.\n", 413 __func__); 414 return; 415 case ATOM_IO_SYSIO: 416 TRACE("%s: SYSIO registers are not implemented.\n", 417 __func__); 418 return; 419 default: 420 if (!(gctx->io_mode & 0x80)) { 421 TRACE("%s: Bad IO mode.\n", __func__); 422 return; 423 } 424 if (!gctx->iio[gctx->io_mode & 0xFF]) { 425 TRACE("%s: Undefined indirect IO write method %d\n", 426 __func__, gctx->io_mode & 0x7F); 427 return; 428 } 429 atom_iio_execute(gctx, gctx->iio[gctx->io_mode&0xFF], 430 idx, val); 431 } 432 break; 433 case ATOM_ARG_PS: 434 idx = U8(*ptr); 435 (*ptr)++; 436 ctx->ps[idx] = B_HOST_TO_LENDIAN_INT32(val); 437 break; 438 case ATOM_ARG_WS: 439 idx = U8(*ptr); 440 (*ptr)++; 441 switch(idx) { 442 case ATOM_WS_QUOTIENT: 443 gctx->divmul[0] = val; 444 break; 445 case ATOM_WS_REMAINDER: 446 gctx->divmul[1] = val; 447 break; 448 case ATOM_WS_DATAPTR: 449 gctx->data_block = val; 450 break; 451 case ATOM_WS_SHIFT: 452 gctx->shift = val; 453 break; 454 case ATOM_WS_OR_MASK: 455 case ATOM_WS_AND_MASK: 456 break; 457 case ATOM_WS_FB_WINDOW: 458 gctx->fb_base = val; 459 break; 460 case ATOM_WS_ATTRIBUTES: 461 gctx->io_attr = val; 462 break; 463 case ATOM_WS_REGPTR: 464 gctx->reg_block = val; 465 break; 466 default: 467 ctx->ws[idx] = val; 468 } 469 break; 470 case ATOM_ARG_FB: 471 idx = U8(*ptr); 472 (*ptr)++; 473 if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { 474 ERROR("%s: fb tried to write beyond scratch region! " 475 "%" B_PRIu32 " vs. %" B_PRIu32 "\n", __func__, 476 gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); 477 } else 478 gctx->scratch[(gctx->fb_base / 4) + idx] = val; 479 break; 480 case ATOM_ARG_PLL: 481 idx = U8(*ptr); 482 (*ptr)++; 483 gctx->card->pll_write(idx, val); 484 break; 485 case ATOM_ARG_MC: 486 idx = U8(*ptr); 487 (*ptr)++; 488 gctx->card->mc_write(idx, val); 489 return; 490 } 491 } 492 493 494 static void 495 atom_op_add(atom_exec_context *ctx, int *ptr, int arg) 496 { 497 uint8 attr = U8((*ptr)++); 498 uint32 dst, src, saved; 499 int dptr = *ptr; 500 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 501 src = atom_get_src(ctx, attr, ptr); 502 #ifdef TRACE_ATOM 503 TRACE("%s: 0x%" B_PRIX32 " + 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 504 __func__, dst, src, dst + src); 505 #endif 506 dst += src; 507 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 508 } 509 510 511 static void 512 atom_op_and(atom_exec_context *ctx, int *ptr, int arg) 513 { 514 uint8 attr = U8((*ptr)++); 515 uint32 dst, src, saved; 516 int dptr = *ptr; 517 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 518 src = atom_get_src(ctx, attr, ptr); 519 #ifdef TRACE_ATOM 520 TRACE("%s: 0x%" B_PRIX32 " & 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 521 __func__, src, dst, dst & src); 522 #endif 523 dst &= src; 524 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 525 } 526 527 528 static void 529 atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) 530 { 531 TRACE("%s: Quack!\n", __func__); 532 } 533 534 535 static void 536 atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) 537 { 538 int idx = U8((*ptr)++); 539 status_t result = B_OK; 540 541 if (idx < ATOM_TABLE_NAMES_CNT) { 542 TRACE("%s: table: %s (%d)\n", __func__, atom_table_names[idx], idx); 543 } else { 544 TRACE("%s: table: unknown (%d)\n", __func__, idx); 545 } 546 547 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) { 548 result = atom_execute_table_locked(ctx->ctx, 549 idx, ctx->ps + ctx->ps_shift); 550 } 551 552 if (result != B_OK) 553 ctx->abort = true; 554 } 555 556 557 static void 558 atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) 559 { 560 uint8 attr = U8((*ptr)++); 561 uint32 saved; 562 int dptr = *ptr; 563 attr &= 0x38; 564 attr |= atom_def_dst[attr>>3]<<6; 565 atom_get_dst(ctx, arg, attr, ptr, &saved, 0); 566 TRACE("%s\n", __func__); 567 atom_put_dst(ctx, arg, attr, &dptr, 0, saved); 568 } 569 570 571 static void 572 atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) 573 { 574 uint8 attr = U8((*ptr)++); 575 uint32 dst, src; 576 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 577 src = atom_get_src(ctx, attr, ptr); 578 ctx->ctx->cs_equal = (dst == src); 579 ctx->ctx->cs_above = (dst > src); 580 TRACE("%s: 0x%" B_PRIX32 " %s 0x%" B_PRIX32 "\n", __func__, 581 dst, ctx->ctx->cs_above ? ">" : "<=", src); 582 } 583 584 585 static void 586 atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) 587 { 588 bigtime_t count = U8((*ptr)++); 589 if (arg == ATOM_UNIT_MICROSEC) { 590 TRACE("%s: %" B_PRId64 " microseconds\n", __func__, count); 591 // Microseconds 592 snooze(count); 593 } else { 594 TRACE("%s: %" B_PRId64 " milliseconds\n", __func__, count); 595 // Milliseconds 596 snooze(count * 1000); 597 } 598 } 599 600 601 static void 602 atom_op_div(atom_exec_context *ctx, int *ptr, int arg) 603 { 604 uint8 attr = U8((*ptr)++); 605 uint32 dst, src; 606 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 607 src = atom_get_src(ctx, attr, ptr); 608 if (src != 0) { 609 ctx->ctx->divmul[0] = dst / src; 610 ctx->ctx->divmul[1] = dst%src; 611 } else { 612 ctx->ctx->divmul[0] = 0; 613 ctx->ctx->divmul[1] = 0; 614 } 615 #ifdef ATOM_TRACE 616 TRACE("%s: 0x%" B_PRIX32 " / 0x%" B_PRIX32 " is 0x%" B_PRIX32 617 " remander 0x%" B_PRIX32 "\n", __func__, dst, src, 618 ctx->ctx->divmul[0], ctx->ctx->divmul[1]); 619 #endif 620 } 621 622 623 static void 624 atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) 625 { 626 /* functionally, a nop */ 627 } 628 629 630 static void 631 atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) 632 { 633 int execute = 0, target = U16(*ptr); 634 (*ptr) += 2; 635 switch(arg) { 636 case ATOM_COND_ABOVE: 637 execute = ctx->ctx->cs_above; 638 break; 639 case ATOM_COND_ABOVEOREQUAL: 640 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal; 641 break; 642 case ATOM_COND_ALWAYS: 643 execute = 1; 644 break; 645 case ATOM_COND_BELOW: 646 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal); 647 break; 648 case ATOM_COND_BELOWOREQUAL: 649 execute = !ctx->ctx->cs_above; 650 break; 651 case ATOM_COND_EQUAL: 652 execute = ctx->ctx->cs_equal; 653 break; 654 case ATOM_COND_NOTEQUAL: 655 execute = !ctx->ctx->cs_equal; 656 break; 657 } 658 TRACE("%s: execute jump: %s; target: 0x%04X\n", __func__, 659 execute? "yes" : "no", target); 660 661 if (execute) { 662 if (ctx->last_jump == (ctx->start + target)) { 663 if (ctx->last_jump_count > ATOM_OP_JMP_TIMEOUT) { 664 ERROR("%s: DANGER! AtomBIOS stuck in loop" 665 " for more then %d jumps... abort!\n", 666 __func__, ATOM_OP_JMP_TIMEOUT); 667 ctx->abort = true; 668 } else { 669 ctx->last_jump_count++; 670 } 671 } else { 672 ctx->last_jump = ctx->start + target; 673 ctx->last_jump_count = 1; 674 } 675 *ptr = ctx->start + target; 676 } 677 } 678 679 680 static void 681 atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) 682 { 683 uint8 attr = U8((*ptr)++); 684 uint32 dst, mask, src, saved; 685 int dptr = *ptr; 686 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 687 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); 688 src = atom_get_src(ctx, attr, ptr); 689 dst &= mask; 690 dst |= src; 691 TRACE("%s: src: 0x%" B_PRIX32 " mask 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 692 __func__, src, mask, dst); 693 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 694 } 695 696 697 static void 698 atom_op_move(atom_exec_context *ctx, int *ptr, int arg) 699 { 700 uint8 attr = U8((*ptr)++); 701 uint32 src, saved; 702 int dptr = *ptr; 703 if (((attr >> 3) & 7) != ATOM_SRC_DWORD) 704 atom_get_dst(ctx, arg, attr, ptr, &saved, 0); 705 else { 706 atom_skip_dst(ctx, arg, attr, ptr); 707 saved = 0xCDCDCDCD; 708 } 709 src = atom_get_src(ctx, attr, ptr); 710 TRACE("%s: src: 0x%" B_PRIX32 "; saved: 0x%" B_PRIX32 "\n", 711 __func__, src, saved); 712 atom_put_dst(ctx, arg, attr, &dptr, src, saved); 713 } 714 715 716 static void 717 atom_op_mul(atom_exec_context *ctx, int *ptr, int arg) 718 { 719 uint8 attr = U8((*ptr)++); 720 uint32 dst, src; 721 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 722 src = atom_get_src(ctx, attr, ptr); 723 ctx->ctx->divmul[0] = dst * src; 724 TRACE("%s: 0x%" B_PRIX32 " * 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 725 __func__, dst, src, ctx->ctx->divmul[0]); 726 } 727 728 729 static void 730 atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) 731 { 732 /* nothing */ 733 } 734 735 736 static void 737 atom_op_or(atom_exec_context *ctx, int *ptr, int arg) 738 { 739 uint8 attr = U8((*ptr)++); 740 uint32 dst, src, saved; 741 int dptr = *ptr; 742 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 743 src = atom_get_src(ctx, attr, ptr); 744 #ifdef ATOM_TRACE 745 TRACE("%s: 0x%" B_PRIX32 " | 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 746 __func__, dst, src, dst | src); 747 #endif 748 dst |= src; 749 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 750 } 751 752 753 static void 754 atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg) 755 { 756 #ifdef ATOM_TRACE 757 uint8 val = U8((*ptr)++); 758 TRACE("%s: POST card output: 0x%" B_PRIX8 "\n", __func__, val); 759 #endif 760 } 761 762 763 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg) 764 { 765 TRACE("%s: unimplemented!\n", __func__); 766 } 767 768 769 static void 770 atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg) 771 { 772 TRACE("%s: unimplemented!\n", __func__); 773 } 774 775 776 static void 777 atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg) 778 { 779 TRACE("%s: unimplemented!\n", __func__); 780 } 781 782 783 static void 784 atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg) 785 { 786 int idx = U8(*ptr); 787 (*ptr)++; 788 TRACE("%s: block: %d\n", __func__, idx); 789 if (!idx) 790 ctx->ctx->data_block = 0; 791 else if (idx == 255) 792 ctx->ctx->data_block = ctx->start; 793 else 794 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx); 795 } 796 797 798 static void 799 atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg) 800 { 801 uint8 attr = U8((*ptr)++); 802 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr); 803 TRACE("%s: fb_base: 0x%" B_PRIX32 "\n", __func__, ctx->ctx->fb_base); 804 } 805 806 807 static void 808 atom_op_setport(atom_exec_context *ctx, int *ptr, int arg) 809 { 810 int port; 811 switch(arg) { 812 case ATOM_PORT_ATI: 813 port = U16(*ptr); 814 if (port < ATOM_IO_NAMES_CNT) { 815 TRACE("%s: port: %d (%s)\n", __func__, 816 port, atom_io_names[port]); 817 } else 818 TRACE("%s: port: %d\n", __func__, port); 819 if (!port) 820 ctx->ctx->io_mode = ATOM_IO_MM; 821 else 822 ctx->ctx->io_mode = ATOM_IO_IIO | port; 823 (*ptr) += 2; 824 break; 825 case ATOM_PORT_PCI: 826 ctx->ctx->io_mode = ATOM_IO_PCI; 827 (*ptr)++; 828 break; 829 case ATOM_PORT_SYSIO: 830 ctx->ctx->io_mode = ATOM_IO_SYSIO; 831 (*ptr)++; 832 break; 833 } 834 } 835 836 837 static void 838 atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) 839 { 840 ctx->ctx->reg_block = U16(*ptr); 841 (*ptr)+=2; 842 } 843 844 845 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) 846 { 847 uint8 attr = U8((*ptr)++), shift; 848 uint32 saved, dst; 849 int dptr = *ptr; 850 attr &= 0x38; 851 attr |= atom_def_dst[attr >> 3] << 6; 852 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 853 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); 854 #ifdef ATOM_TRACE 855 TRACE("%s: 0x%" B_PRIX32 " << %" B_PRId8 " is 0X%" B_PRIX32 "\n", 856 __func__, dst, shift, dst << shift); 857 #endif 858 dst <<= shift; 859 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 860 } 861 862 863 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) 864 { 865 uint8 attr = U8((*ptr)++), shift; 866 uint32 saved, dst; 867 int dptr = *ptr; 868 attr &= 0x38; 869 attr |= atom_def_dst[attr >> 3] << 6; 870 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 871 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); 872 #ifdef ATOM_TRACE 873 TRACE("%s: 0x%" B_PRIX32 " >> %" B_PRId8 " is 0X%" B_PRIX32 "\n", 874 __func__, dst, shift, dst << shift); 875 #endif 876 dst >>= shift; 877 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 878 } 879 880 881 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) 882 { 883 uint8 attr = U8((*ptr)++), shift; 884 uint32 saved, dst; 885 int dptr = *ptr; 886 uint32 dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; 887 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 888 /* op needs to full dst value */ 889 dst = saved; 890 shift = atom_get_src(ctx, attr, ptr); 891 #ifdef ATOM_TRACE 892 TRACE("%s: 0x%" B_PRIX32 " << %" B_PRId8 " is 0X%" B_PRIX32 "\n", 893 __func__, dst, shift, dst << shift); 894 #endif 895 dst <<= shift; 896 dst &= atom_arg_mask[dst_align]; 897 dst >>= atom_arg_shift[dst_align]; 898 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 899 } 900 901 902 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) 903 { 904 uint8 attr = U8((*ptr)++), shift; 905 uint32 saved, dst; 906 int dptr = *ptr; 907 uint32 dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3]; 908 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 909 /* op needs to full dst value */ 910 dst = saved; 911 shift = atom_get_src(ctx, attr, ptr); 912 #ifdef ATOM_TRACE 913 TRACE("%s: 0x%" B_PRIX32 " >> %" B_PRId8 " is 0X%" B_PRIX32 "\n", 914 __func__, dst, shift, dst << shift); 915 #endif 916 dst >>= shift; 917 dst &= atom_arg_mask[dst_align]; 918 dst >>= atom_arg_shift[dst_align]; 919 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 920 } 921 922 923 static void 924 atom_op_sub(atom_exec_context *ctx, int *ptr, int arg) 925 { 926 uint8 attr = U8((*ptr)++); 927 uint32 dst, src, saved; 928 int dptr = *ptr; 929 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 930 src = atom_get_src(ctx, attr, ptr); 931 #ifdef TRACE_ATOM 932 TRACE("%s: 0x%" B_PRIX32 " - 0x%" B_PRIX32 " is 0x%" B_PRIX32 "\n", 933 __func__, dst, src, dst - src); 934 #endif 935 dst -= src; 936 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 937 } 938 939 940 static void 941 atom_op_switch(atom_exec_context *ctx, int *ptr, int arg) 942 { 943 uint8 attr = U8((*ptr)++); 944 uint32 src, val, target; 945 TRACE("%s: switch start\n", __func__); 946 src = atom_get_src(ctx, attr, ptr); 947 while (U16(*ptr) != ATOM_CASE_END) 948 if (U8(*ptr) == ATOM_CASE_MAGIC) { 949 (*ptr)++; 950 TRACE("%s: switch case\n", __func__); 951 val = atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM, ptr); 952 target = U16(*ptr); 953 if (val == src) { 954 *ptr = ctx->start + target; 955 return; 956 } 957 (*ptr) += 2; 958 } else { 959 TRACE("%s: ERROR bad case\n", __func__); 960 return; 961 } 962 (*ptr) += 2; 963 } 964 965 966 static void 967 atom_op_test(atom_exec_context *ctx, int *ptr, int arg) 968 { 969 uint8 attr = U8((*ptr)++); 970 uint32 dst, src; 971 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); 972 src = atom_get_src(ctx, attr, ptr); 973 ctx->ctx->cs_equal = ((dst & src) == 0); 974 TRACE("%s: 0x%" B_PRIX32 " and 0x%" B_PRIX32 " are %s\n", __func__, 975 dst, src, ctx->ctx->cs_equal ? "EQ" : "NE"); 976 } 977 978 979 static void 980 atom_op_xor(atom_exec_context *ctx, int *ptr, int arg) 981 { 982 uint8 attr = U8((*ptr)++); 983 uint32 dst, src, saved; 984 int dptr = *ptr; 985 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 986 src = atom_get_src(ctx, attr, ptr); 987 #ifdef ATOM_TRACE 988 TRACE("%s: 0x%" B_PRIX32 " ^ 0X%" B_PRIX32 " is " B_PRIX32 "\n", 989 __func__, dst, src, dst ^ src); 990 #endif 991 dst ^= src; 992 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 993 } 994 995 996 static void 997 atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) 998 { 999 TRACE("%s: unimplemented!\n", __func__); 1000 } 1001 1002 1003 static struct { 1004 void (*func)(atom_exec_context *, int *, int); 1005 int arg; 1006 } opcode_table[ATOM_OP_CNT] = { 1007 { NULL, 0 }, 1008 { atom_op_move, ATOM_ARG_REG }, 1009 { atom_op_move, ATOM_ARG_PS }, 1010 { atom_op_move, ATOM_ARG_WS }, 1011 { atom_op_move, ATOM_ARG_FB }, 1012 { atom_op_move, ATOM_ARG_PLL }, 1013 { atom_op_move, ATOM_ARG_MC }, 1014 { atom_op_and, ATOM_ARG_REG }, 1015 { atom_op_and, ATOM_ARG_PS }, 1016 { atom_op_and, ATOM_ARG_WS }, 1017 { atom_op_and, ATOM_ARG_FB }, 1018 { atom_op_and, ATOM_ARG_PLL }, 1019 { atom_op_and, ATOM_ARG_MC }, 1020 { atom_op_or, ATOM_ARG_REG }, 1021 { atom_op_or, ATOM_ARG_PS }, 1022 { atom_op_or, ATOM_ARG_WS }, 1023 { atom_op_or, ATOM_ARG_FB }, 1024 { atom_op_or, ATOM_ARG_PLL }, 1025 { atom_op_or, ATOM_ARG_MC }, 1026 { atom_op_shift_left, ATOM_ARG_REG }, 1027 { atom_op_shift_left, ATOM_ARG_PS }, 1028 { atom_op_shift_left, ATOM_ARG_WS }, 1029 { atom_op_shift_left, ATOM_ARG_FB }, 1030 { atom_op_shift_left, ATOM_ARG_PLL }, 1031 { atom_op_shift_left, ATOM_ARG_MC }, 1032 { atom_op_shift_right, ATOM_ARG_REG }, 1033 { atom_op_shift_right, ATOM_ARG_PS }, 1034 { atom_op_shift_right, ATOM_ARG_WS }, 1035 { atom_op_shift_right, ATOM_ARG_FB }, 1036 { atom_op_shift_right, ATOM_ARG_PLL }, 1037 { atom_op_shift_right, ATOM_ARG_MC }, 1038 { atom_op_mul, ATOM_ARG_REG }, 1039 { atom_op_mul, ATOM_ARG_PS }, 1040 { atom_op_mul, ATOM_ARG_WS }, 1041 { atom_op_mul, ATOM_ARG_FB }, 1042 { atom_op_mul, ATOM_ARG_PLL }, 1043 { atom_op_mul, ATOM_ARG_MC }, 1044 { atom_op_div, ATOM_ARG_REG }, 1045 { atom_op_div, ATOM_ARG_PS }, 1046 { atom_op_div, ATOM_ARG_WS }, 1047 { atom_op_div, ATOM_ARG_FB }, 1048 { atom_op_div, ATOM_ARG_PLL }, 1049 { atom_op_div, ATOM_ARG_MC }, 1050 { atom_op_add, ATOM_ARG_REG }, 1051 { atom_op_add, ATOM_ARG_PS }, 1052 { atom_op_add, ATOM_ARG_WS }, 1053 { atom_op_add, ATOM_ARG_FB }, 1054 { atom_op_add, ATOM_ARG_PLL }, 1055 { atom_op_add, ATOM_ARG_MC }, 1056 { atom_op_sub, ATOM_ARG_REG }, 1057 { atom_op_sub, ATOM_ARG_PS }, 1058 { atom_op_sub, ATOM_ARG_WS }, 1059 { atom_op_sub, ATOM_ARG_FB }, 1060 { atom_op_sub, ATOM_ARG_PLL }, 1061 { atom_op_sub, ATOM_ARG_MC }, 1062 { atom_op_setport, ATOM_PORT_ATI }, 1063 { atom_op_setport, ATOM_PORT_PCI }, 1064 { atom_op_setport, ATOM_PORT_SYSIO }, 1065 { atom_op_setregblock, 0 }, 1066 { atom_op_setfbbase, 0 }, 1067 { atom_op_compare, ATOM_ARG_REG }, 1068 { atom_op_compare, ATOM_ARG_PS }, 1069 { atom_op_compare, ATOM_ARG_WS }, 1070 { atom_op_compare, ATOM_ARG_FB }, 1071 { atom_op_compare, ATOM_ARG_PLL }, 1072 { atom_op_compare, ATOM_ARG_MC }, 1073 { atom_op_switch, 0 }, 1074 { atom_op_jump, ATOM_COND_ALWAYS }, 1075 { atom_op_jump, ATOM_COND_EQUAL }, 1076 { atom_op_jump, ATOM_COND_BELOW }, 1077 { atom_op_jump, ATOM_COND_ABOVE }, 1078 { atom_op_jump, ATOM_COND_BELOWOREQUAL }, 1079 { atom_op_jump, ATOM_COND_ABOVEOREQUAL }, 1080 { atom_op_jump, ATOM_COND_NOTEQUAL }, 1081 { atom_op_test, ATOM_ARG_REG }, 1082 { atom_op_test, ATOM_ARG_PS }, 1083 { atom_op_test, ATOM_ARG_WS }, 1084 { atom_op_test, ATOM_ARG_FB }, 1085 { atom_op_test, ATOM_ARG_PLL }, 1086 { atom_op_test, ATOM_ARG_MC }, 1087 { atom_op_delay, ATOM_UNIT_MILLISEC }, 1088 { atom_op_delay, ATOM_UNIT_MICROSEC }, 1089 { atom_op_calltable, 0 }, 1090 { atom_op_repeat, 0 }, 1091 { atom_op_clear, ATOM_ARG_REG }, 1092 { atom_op_clear, ATOM_ARG_PS }, 1093 { atom_op_clear, ATOM_ARG_WS }, 1094 { atom_op_clear, ATOM_ARG_FB }, 1095 { atom_op_clear, ATOM_ARG_PLL }, 1096 { atom_op_clear, ATOM_ARG_MC }, 1097 { atom_op_nop, 0 }, 1098 { atom_op_eot, 0 }, 1099 { atom_op_mask, ATOM_ARG_REG }, 1100 { atom_op_mask, ATOM_ARG_PS }, 1101 { atom_op_mask, ATOM_ARG_WS }, 1102 { atom_op_mask, ATOM_ARG_FB }, 1103 { atom_op_mask, ATOM_ARG_PLL }, 1104 { atom_op_mask, ATOM_ARG_MC }, 1105 { atom_op_postcard, 0 }, 1106 { atom_op_beep, 0 }, 1107 { atom_op_savereg, 0 }, 1108 { atom_op_restorereg, 0 }, 1109 { atom_op_setdatablock, 0 }, 1110 { atom_op_xor, ATOM_ARG_REG }, 1111 { atom_op_xor, ATOM_ARG_PS }, 1112 { atom_op_xor, ATOM_ARG_WS }, 1113 { atom_op_xor, ATOM_ARG_FB }, 1114 { atom_op_xor, ATOM_ARG_PLL }, 1115 { atom_op_xor, ATOM_ARG_MC }, 1116 { atom_op_shl, ATOM_ARG_REG }, 1117 { atom_op_shl, ATOM_ARG_PS }, 1118 { atom_op_shl, ATOM_ARG_WS }, 1119 { atom_op_shl, ATOM_ARG_FB }, 1120 { atom_op_shl, ATOM_ARG_PLL }, 1121 { atom_op_shl, ATOM_ARG_MC }, 1122 { atom_op_shr, ATOM_ARG_REG }, 1123 { atom_op_shr, ATOM_ARG_PS }, 1124 { atom_op_shr, ATOM_ARG_WS }, 1125 { atom_op_shr, ATOM_ARG_FB }, 1126 { atom_op_shr, ATOM_ARG_PLL }, 1127 { atom_op_shr, ATOM_ARG_MC }, 1128 { atom_op_debug, 0 }, 1129 }; 1130 1131 1132 status_t 1133 atom_execute_table_locked(atom_context *ctx, int index, uint32 * params) 1134 { 1135 int base = CU16(ctx->cmd_table + 4 + 2 * index); 1136 int len, ws, ps, ptr; 1137 unsigned char op; 1138 atom_exec_context ectx; 1139 1140 if (!base) 1141 return B_ERROR; 1142 1143 len = CU16(base + ATOM_CT_SIZE_PTR); 1144 ws = CU8(base + ATOM_CT_WS_PTR); 1145 ps = CU8(base + ATOM_CT_PS_PTR) & ATOM_CT_PS_MASK; 1146 ptr = base + ATOM_CT_CODE_PTR; 1147 1148 ectx.ctx = ctx; 1149 ectx.ps_shift = ps / 4; 1150 ectx.start = base; 1151 ectx.ps = params; 1152 ectx.abort = false; 1153 ectx.last_jump = 0; 1154 ectx.last_jump_count = 0; 1155 if (ws) 1156 ectx.ws = (uint32*)malloc(4 * ws); 1157 else 1158 ectx.ws = NULL; 1159 1160 debug_depth++; 1161 while (1) { 1162 op = CU8(ptr++); 1163 if (op < ATOM_OP_NAMES_CNT) { 1164 TRACE("%s: %s (0x%" B_PRIX16 ")\n", __func__, 1165 atom_op_names[op], ptr - 1); 1166 } else 1167 TRACE("%s: unknown (0x%" B_PRIX16 ")\n", __func__, ptr - 1); 1168 1169 if (ectx.abort == true) { 1170 ERROR("AtomBios parser was aborted executing (0x%" B_PRIX16 ")\n", 1171 ptr - 1); 1172 free(ectx.ws); 1173 return B_ERROR; 1174 } 1175 1176 if (op < ATOM_OP_CNT && op > 0) 1177 opcode_table[op].func(&ectx, &ptr, opcode_table[op].arg); 1178 else 1179 break; 1180 1181 if (op == ATOM_OP_EOT) 1182 break; 1183 } 1184 debug_depth--; 1185 1186 free(ectx.ws); 1187 return B_OK; 1188 } 1189 1190 1191 status_t 1192 atom_execute_table(atom_context *ctx, int index, uint32 *params) 1193 { 1194 if (acquire_sem_etc(ctx->exec_sem, 1, B_RELATIVE_TIMEOUT, 5000000) 1195 != B_NO_ERROR) { 1196 ERROR("%s: Timeout to obtain semaphore!\n", __func__); 1197 return B_ERROR; 1198 } 1199 /* reset reg block */ 1200 ctx->reg_block = 0; 1201 /* reset fb window */ 1202 ctx->fb_base = 0; 1203 /* reset io mode */ 1204 ctx->io_mode = ATOM_IO_MM; 1205 status_t result = atom_execute_table_locked(ctx, index, params); 1206 1207 release_sem(ctx->exec_sem); 1208 return result; 1209 } 1210 1211 1212 static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; 1213 1214 1215 static void 1216 atom_index_iio(atom_context *ctx, int base) 1217 { 1218 ctx->iio = (uint16*)malloc(2 * 256); 1219 while (CU8(base) == ATOM_IIO_START) { 1220 ctx->iio[CU8(base + 1)] = base + 2; 1221 base += 2; 1222 while (CU8(base) != ATOM_IIO_END) 1223 base += atom_iio_len[CU8(base)]; 1224 base += 3; 1225 } 1226 } 1227 1228 1229 atom_context* 1230 atom_parse(card_info *card, uint8 *bios) 1231 { 1232 atom_context *ctx = (atom_context*)malloc(sizeof(atom_context)); 1233 1234 if (ctx == NULL) { 1235 ERROR("%s: Error: No memory for atom_context mapping\n", __func__); 1236 return NULL; 1237 } 1238 1239 ctx->card = card; 1240 ctx->bios = bios; 1241 1242 if (CU16(0) != ATOM_BIOS_MAGIC) { 1243 ERROR("Invalid BIOS magic.\n"); 1244 free(ctx); 1245 return NULL; 1246 } 1247 if (strncmp(CSTR(ATOM_ATI_MAGIC_PTR), ATOM_ATI_MAGIC, 1248 strlen(ATOM_ATI_MAGIC))) { 1249 ERROR("Invalid ATI magic.\n"); 1250 free(ctx); 1251 return NULL; 1252 } 1253 1254 int base = CU16(ATOM_ROM_TABLE_PTR); 1255 if (strncmp(CSTR(base + ATOM_ROM_MAGIC_PTR), ATOM_ROM_MAGIC, 1256 strlen(ATOM_ROM_MAGIC))) { 1257 ERROR("Invalid ATOM magic.\n"); 1258 free(ctx); 1259 return NULL; 1260 } 1261 1262 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR); 1263 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR); 1264 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4); 1265 1266 char *str = CSTR(CU16(base + ATOM_ROM_MSG_PTR)); 1267 while (*str && ((*str == '\n') || (*str == '\r'))) 1268 str++; 1269 1270 int i; 1271 char name[512]; 1272 // Terminate bios string if not 0 terminated 1273 for (i = 0; i < 511; i++) { 1274 name[i] = str[i]; 1275 if (name[i] < '.' || name[i] > 'z') { 1276 name[i] = 0; 1277 break; 1278 } 1279 } 1280 1281 TRACE("ATOM BIOS: %s", name); 1282 1283 return ctx; 1284 } 1285 1286 1287 status_t 1288 atom_asic_init(atom_context *ctx) 1289 { 1290 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); 1291 uint32 ps[16]; 1292 memset(ps, 0, 64); 1293 1294 ps[0] = B_HOST_TO_LENDIAN_INT32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR)); 1295 ps[1] = B_HOST_TO_LENDIAN_INT32(CU32(hwi + ATOM_FWI_DEFMCLK_PTR)); 1296 if (!ps[0] || !ps[1]) 1297 return B_ERROR; 1298 1299 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) 1300 return B_ERROR; 1301 1302 return atom_execute_table(ctx, ATOM_CMD_INIT, ps); 1303 } 1304 1305 1306 void 1307 atom_destroy(atom_context *ctx) 1308 { 1309 if (ctx != NULL) { 1310 free(ctx->iio); 1311 free(ctx->scratch); 1312 delete_sem(ctx->exec_sem); 1313 } 1314 1315 free(ctx); 1316 } 1317 1318 1319 status_t 1320 atom_parse_data_header(atom_context *ctx, int index, uint16 *size, 1321 uint8 *frev, uint8 *crev, uint16 *data_start) 1322 { 1323 int offset = index * 2 + 4; 1324 int idx = CU16(ctx->data_table + offset); 1325 uint16 *mdt = (uint16 *)ctx->bios + ctx->data_table + 4; 1326 1327 if (!mdt[index]) 1328 return B_ERROR; 1329 1330 if (size) 1331 *size = CU16(idx); 1332 if (frev) 1333 *frev = CU8(idx + 2); 1334 if (crev) 1335 *crev = CU8(idx + 3); 1336 *data_start = idx; 1337 return B_OK; 1338 } 1339 1340 1341 status_t 1342 atom_parse_cmd_header(atom_context *ctx, int index, uint8 * frev, 1343 uint8 * crev) 1344 { 1345 int offset = index * 2 + 4; 1346 int idx = CU16(ctx->cmd_table + offset); 1347 uint16 *mct = (uint16 *)ctx->bios + ctx->cmd_table + 4; 1348 1349 if (!mct[index]) 1350 return B_ERROR; 1351 1352 if (frev) 1353 *frev = CU8(idx + 2); 1354 if (crev) 1355 *crev = CU8(idx + 3); 1356 return B_OK; 1357 } 1358 1359 1360 status_t 1361 atom_allocate_fb_scratch(atom_context *ctx) 1362 { 1363 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); 1364 uint16 data_offset; 1365 int usage_bytes = 0; 1366 _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware; 1367 1368 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset) 1369 == B_OK) { 1370 firmware = (_ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); 1371 1372 TRACE("Atom firmware requested 0x%" B_PRIX32 " %" B_PRIu16 "kb\n", 1373 firmware->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, 1374 firmware->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); 1375 1376 usage_bytes 1377 = firmware->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; 1378 } 1379 ctx->scratch_size_bytes = 0; 1380 if (usage_bytes == 0) 1381 usage_bytes = 20 * 1024; 1382 /* allocate some scratch memory */ 1383 ctx->scratch = (uint32*)malloc(usage_bytes); 1384 if (!ctx->scratch) 1385 return B_NO_MEMORY; 1386 1387 ctx->scratch_size_bytes = usage_bytes; 1388 return B_OK; 1389 } 1390