1 /*
2 * Copyright 2006-2011, Haiku, Inc. All Rights Reserved.
3 * Distributed under the terms of the MIT License.
4 *
5 * Authors:
6 * Alexander von Gluck, kallisti5@unixzen.com
7 * Axel Dörfler, axeld@pinc-software.de
8 */
9
10
11 #include "gpu.h"
12
13 #include <Debug.h>
14
15 #include "accelerant_protos.h"
16 #include "accelerant.h"
17 #include "atom.h"
18 #include "bios.h"
19 #include "pll.h"
20 #include "utility.h"
21
22
23 #undef TRACE
24
25 #define TRACE_GPU
26 #ifdef TRACE_GPU
27 # define TRACE(x...) _sPrintf("radeon_hd: " x)
28 #else
29 # define TRACE(x...) ;
30 #endif
31
32 #define ERROR(x...) _sPrintf("radeon_hd: " x)
33
34
35 status_t
radeon_gpu_probe()36 radeon_gpu_probe()
37 {
38 uint8 tableMajor;
39 uint8 tableMinor;
40 uint16 tableOffset;
41
42 gInfo->displayClockFrequency = 0;
43 gInfo->dpExternalClock = 0;
44
45 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
46 if (atom_parse_data_header(gAtomContext, index, NULL,
47 &tableMajor, &tableMinor, &tableOffset) != B_OK) {
48 ERROR("%s: Couldn't parse data header\n", __func__);
49 return B_ERROR;
50 }
51
52 TRACE("%s: table %" B_PRIu8 ".%" B_PRIu8 "\n", __func__,
53 tableMajor, tableMinor);
54
55 union atomFirmwareInfo {
56 ATOM_FIRMWARE_INFO info;
57 ATOM_FIRMWARE_INFO_V1_2 info_12;
58 ATOM_FIRMWARE_INFO_V1_3 info_13;
59 ATOM_FIRMWARE_INFO_V1_4 info_14;
60 ATOM_FIRMWARE_INFO_V2_1 info_21;
61 ATOM_FIRMWARE_INFO_V2_2 info_22;
62 };
63 union atomFirmwareInfo* firmwareInfo
64 = (union atomFirmwareInfo*)(gAtomContext->bios + tableOffset);
65
66 radeon_shared_info &info = *gInfo->shared_info;
67
68 if (info.dceMajor >= 4) {
69 gInfo->displayClockFrequency = B_LENDIAN_TO_HOST_INT32(
70 firmwareInfo->info_21.ulDefaultDispEngineClkFreq);
71 gInfo->displayClockFrequency *= 10;
72 if (gInfo->displayClockFrequency == 0) {
73 if (info.dceMajor == 5)
74 gInfo->displayClockFrequency = 540000;
75 else
76 gInfo->displayClockFrequency = 600000;
77 }
78 gInfo->dpExternalClock = B_LENDIAN_TO_HOST_INT16(
79 firmwareInfo->info_21.usUniphyDPModeExtClkFreq);
80 gInfo->dpExternalClock *= 10;
81 }
82
83 gInfo->maximumPixelClock = B_LENDIAN_TO_HOST_INT16(
84 firmwareInfo->info.usMaxPixelClock);
85 gInfo->maximumPixelClock *= 10;
86
87 if (gInfo->maximumPixelClock == 0)
88 gInfo->maximumPixelClock = 400000;
89
90 return B_OK;
91 }
92
93
94 status_t
radeon_gpu_reset()95 radeon_gpu_reset()
96 {
97 radeon_shared_info &info = *gInfo->shared_info;
98
99 // Read GRBM Command Processor status
100 if ((Read32(OUT, GRBM_STATUS) & GUI_ACTIVE) == 0)
101 return B_ERROR;
102
103 TRACE("%s: GPU software reset in progress...\n", __func__);
104
105 // Halt memory controller
106 struct gpu_state gpuState;
107 radeon_gpu_mc_halt(&gpuState);
108
109 if (radeon_gpu_mc_idlewait() != B_OK)
110 ERROR("%s: Couldn't idle memory controller!\n", __func__);
111
112 if (info.chipsetID < RADEON_CEDAR) {
113 Write32(OUT, CP_ME_CNTL, CP_ME_HALT);
114 // Disable Command Processor parsing / prefetching
115
116 // Register busy masks for early Radeon HD cards
117
118 // GRBM Command Processor Status
119 uint32 grbmBusyMask = VC_BUSY;
120 // Vertex Cache Busy
121 grbmBusyMask |= VGT_BUSY_NO_DMA | VGT_BUSY;
122 // Vertex Grouper Tessellator Busy
123 grbmBusyMask |= TA03_BUSY;
124 // unknown
125 grbmBusyMask |= TC_BUSY;
126 // Texture Cache Busy
127 grbmBusyMask |= SX_BUSY;
128 // Shader Export Busy
129 grbmBusyMask |= SH_BUSY;
130 // Sequencer Instruction Cache Busy
131 grbmBusyMask |= SPI_BUSY;
132 // Shader Processor Interpolator Busy
133 grbmBusyMask |= SMX_BUSY;
134 // Shader Memory Exchange
135 grbmBusyMask |= SC_BUSY;
136 // Scan Converter Busy
137 grbmBusyMask |= PA_BUSY;
138 // Primitive Assembler Busy
139 grbmBusyMask |= DB_BUSY;
140 // Depth Block Busy
141 grbmBusyMask |= CR_BUSY;
142 // unknown
143 grbmBusyMask |= CB_BUSY;
144 // Color Block Busy
145 grbmBusyMask |= GUI_ACTIVE;
146 // unknown (graphics pipeline active?)
147
148 // GRBM Command Processor Detailed Status
149 uint32 grbm2BusyMask = SPI0_BUSY | SPI1_BUSY | SPI2_BUSY | SPI3_BUSY;
150 // Shader Processor Interpolator 0 - 3 Busy
151 grbm2BusyMask |= TA0_BUSY | TA1_BUSY | TA2_BUSY | TA3_BUSY;
152 // unknown 0 - 3 Busy
153 grbm2BusyMask |= DB0_BUSY | DB1_BUSY | DB2_BUSY | DB3_BUSY;
154 // Depth Block 0 - 3 Busy
155 grbm2BusyMask |= CB0_BUSY | CB1_BUSY | CB2_BUSY | CB3_BUSY;
156 // Color Block 0 - 3 Busy
157
158 uint32 tmp;
159 /* Check if any of the rendering block is busy and reset it */
160 if ((Read32(OUT, GRBM_STATUS) & grbmBusyMask) != 0
161 || (Read32(OUT, GRBM_STATUS2) & grbm2BusyMask) != 0) {
162 tmp = SOFT_RESET_CR
163 | SOFT_RESET_DB
164 | SOFT_RESET_CB
165 | SOFT_RESET_PA
166 | SOFT_RESET_SC
167 | SOFT_RESET_SMX
168 | SOFT_RESET_SPI
169 | SOFT_RESET_SX
170 | SOFT_RESET_SH
171 | SOFT_RESET_TC
172 | SOFT_RESET_TA
173 | SOFT_RESET_VC
174 | SOFT_RESET_VGT;
175 Write32(OUT, GRBM_SOFT_RESET, tmp);
176 Read32(OUT, GRBM_SOFT_RESET);
177 snooze(15000);
178 Write32(OUT, GRBM_SOFT_RESET, 0);
179 }
180
181 // Reset CP
182 tmp = SOFT_RESET_CP;
183 Write32(OUT, GRBM_SOFT_RESET, tmp);
184 Read32(OUT, GRBM_SOFT_RESET);
185 snooze(15000);
186 Write32(OUT, GRBM_SOFT_RESET, 0);
187
188 // Let things settle
189 snooze(1000);
190 } else {
191 // Evergreen and higher
192
193 Write32(OUT, CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
194 // Disable Command Processor parsing / prefetching
195
196 // reset the graphics pipeline components
197 uint32 grbmReset = (SOFT_RESET_CP
198 | SOFT_RESET_CB
199 | SOFT_RESET_DB
200 | SOFT_RESET_GDS
201 | SOFT_RESET_PA
202 | SOFT_RESET_SC
203 | SOFT_RESET_SPI
204 | SOFT_RESET_SH
205 | SOFT_RESET_SX
206 | SOFT_RESET_TC
207 | SOFT_RESET_TA
208 | SOFT_RESET_VGT
209 | SOFT_RESET_IA);
210
211 Write32(OUT, GRBM_SOFT_RESET, grbmReset);
212 Read32(OUT, GRBM_SOFT_RESET);
213
214 snooze(50);
215 Write32(OUT, GRBM_SOFT_RESET, 0);
216 Read32(OUT, GRBM_SOFT_RESET);
217 snooze(50);
218 }
219
220 // Resume memory controller
221 radeon_gpu_mc_resume(&gpuState);
222 return B_OK;
223 }
224
225
226 status_t
radeon_gpu_quirks()227 radeon_gpu_quirks()
228 {
229 radeon_shared_info &info = *gInfo->shared_info;
230
231 // Fix PCIe power distribution issue for Polaris10 XT
232 // aka "Card draws >75W from PCIe bus"
233 if (info.chipsetID == RADEON_POLARIS10 && info.pciRev == 0xc7) {
234 ERROR("%s: Applying Polaris10 power distribution fix.\n",
235 __func__);
236 radeon_gpu_i2c_cmd(0x10, 0x96, 0x1e, 0xdd);
237 radeon_gpu_i2c_cmd(0x10, 0x96, 0x1f, 0xd0);
238 }
239
240 return B_OK;
241 }
242
243
244 status_t
radeon_gpu_i2c_cmd(uint16 slaveAddr,uint16 lineNumber,uint8 offset,uint8 data)245 radeon_gpu_i2c_cmd(uint16 slaveAddr, uint16 lineNumber, uint8 offset,
246 uint8 data)
247 {
248 TRACE("%s\n", __func__);
249
250 PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
251 memset(&args, 0, sizeof(args));
252
253 int index = GetIndexIntoMasterTable(COMMAND,
254 ProcessI2cChannelTransaction);
255
256 args.ucRegIndex = offset;
257 args.lpI2CDataOut = data;
258 args.ucFlag = HW_I2C_WRITE;
259 args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
260 args.ucTransBytes = 1;
261 args.ucSlaveAddr = slaveAddr;
262 args.ucLineNumber = lineNumber;
263
264 atom_execute_table(gAtomContext, index, (uint32*)&args);
265 return B_OK;
266 }
267
268
269 void
radeon_gpu_mc_halt(gpu_state * gpuState)270 radeon_gpu_mc_halt(gpu_state* gpuState)
271 {
272 // Backup current memory controller state
273 gpuState->d1vgaControl = Read32(OUT, AVIVO_D1VGA_CONTROL);
274 gpuState->d2vgaControl = Read32(OUT, AVIVO_D2VGA_CONTROL);
275 gpuState->vgaRenderControl = Read32(OUT, AVIVO_VGA_RENDER_CONTROL);
276 gpuState->vgaHdpControl = Read32(OUT, AVIVO_VGA_HDP_CONTROL);
277 gpuState->d1crtcControl = Read32(OUT, AVIVO_D1CRTC_CONTROL);
278 gpuState->d2crtcControl = Read32(OUT, AVIVO_D2CRTC_CONTROL);
279
280 // halt all memory controller actions
281 Write32(OUT, AVIVO_VGA_RENDER_CONTROL, 0);
282 Write32(OUT, AVIVO_D1CRTC_UPDATE_LOCK, 1);
283 Write32(OUT, AVIVO_D2CRTC_UPDATE_LOCK, 1);
284 Write32(OUT, AVIVO_D1CRTC_CONTROL, 0);
285 Write32(OUT, AVIVO_D2CRTC_CONTROL, 0);
286 Write32(OUT, AVIVO_D1CRTC_UPDATE_LOCK, 0);
287 Write32(OUT, AVIVO_D2CRTC_UPDATE_LOCK, 0);
288 Write32(OUT, AVIVO_D1VGA_CONTROL, 0);
289 Write32(OUT, AVIVO_D2VGA_CONTROL, 0);
290 }
291
292
293 void
radeon_gpu_mc_resume(gpu_state * gpuState)294 radeon_gpu_mc_resume(gpu_state* gpuState)
295 {
296 Write32(OUT, AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS, gInfo->fb.vramStart);
297 Write32(OUT, AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS, gInfo->fb.vramStart);
298 Write32(OUT, AVIVO_D2GRPH_PRIMARY_SURFACE_ADDRESS, gInfo->fb.vramStart);
299 Write32(OUT, AVIVO_D2GRPH_SECONDARY_SURFACE_ADDRESS, gInfo->fb.vramStart);
300 // TODO: Evergreen high surface addresses?
301 Write32(OUT, AVIVO_VGA_MEMORY_BASE_ADDRESS, gInfo->fb.vramStart);
302
303 // Unlock host access
304 Write32(OUT, AVIVO_VGA_HDP_CONTROL, gpuState->vgaHdpControl);
305 snooze(1);
306
307 // Restore memory controller state
308 Write32(OUT, AVIVO_D1VGA_CONTROL, gpuState->d1vgaControl);
309 Write32(OUT, AVIVO_D2VGA_CONTROL, gpuState->d2vgaControl);
310 Write32(OUT, AVIVO_D1CRTC_UPDATE_LOCK, 1);
311 Write32(OUT, AVIVO_D2CRTC_UPDATE_LOCK, 1);
312 Write32(OUT, AVIVO_D1CRTC_CONTROL, gpuState->d1crtcControl);
313 Write32(OUT, AVIVO_D2CRTC_CONTROL, gpuState->d2crtcControl);
314 Write32(OUT, AVIVO_D1CRTC_UPDATE_LOCK, 0);
315 Write32(OUT, AVIVO_D2CRTC_UPDATE_LOCK, 0);
316 Write32(OUT, AVIVO_VGA_RENDER_CONTROL, gpuState->vgaRenderControl);
317 }
318
319
320 status_t
radeon_gpu_mc_idlewait()321 radeon_gpu_mc_idlewait()
322 {
323 uint32 idleStatus;
324
325 uint32 busyBits
326 = (VMC_BUSY | MCB_BUSY | MCDZ_BUSY | MCDY_BUSY | MCDX_BUSY | MCDW_BUSY);
327
328 uint32 tryCount;
329 // We give the gpu 0.5 seconds to become idle checking once every 500usec
330 for (tryCount = 0; tryCount < 1000; tryCount++) {
331 if (((idleStatus = Read32(MC, SRBM_STATUS)) & busyBits) == 0)
332 return B_OK;
333 snooze(500);
334 }
335
336 ERROR("%s: Couldn't idle SRBM!\n", __func__);
337
338 bool state;
339 state = (idleStatus & VMC_BUSY) != 0;
340 TRACE("%s: VMC is %s\n", __func__, state ? "busy" : "idle");
341 state = (idleStatus & MCB_BUSY) != 0;
342 TRACE("%s: MCB is %s\n", __func__, state ? "busy" : "idle");
343 state = (idleStatus & MCDZ_BUSY) != 0;
344 TRACE("%s: MCDZ is %s\n", __func__, state ? "busy" : "idle");
345 state = (idleStatus & MCDY_BUSY) != 0;
346 TRACE("%s: MCDY is %s\n", __func__, state ? "busy" : "idle");
347 state = (idleStatus & MCDX_BUSY) != 0;
348 TRACE("%s: MCDX is %s\n", __func__, state ? "busy" : "idle");
349 state = (idleStatus & MCDW_BUSY) != 0;
350 TRACE("%s: MCDW is %s\n", __func__, state ? "busy" : "idle");
351
352 return B_TIMED_OUT;
353 }
354
355
356 static status_t
radeon_gpu_mc_setup_r600()357 radeon_gpu_mc_setup_r600()
358 {
359 // HDP initialization
360 uint32 i;
361 uint32 j;
362 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
363 Write32(OUT, (0x2c14 + j), 0x00000000);
364 Write32(OUT, (0x2c18 + j), 0x00000000);
365 Write32(OUT, (0x2c1c + j), 0x00000000);
366 Write32(OUT, (0x2c20 + j), 0x00000000);
367 Write32(OUT, (0x2c24 + j), 0x00000000);
368 }
369 Write32(OUT, R600_HDP_REG_COHERENCY_FLUSH_CNTL, 0);
370
371 // idle the memory controller
372 struct gpu_state gpuState;
373 radeon_gpu_mc_halt(&gpuState);
374
375 if (radeon_gpu_mc_idlewait() != B_OK)
376 ERROR("%s: Modifying non-idle memory controller!\n", __func__);
377
378 // TODO: Memory Controller AGP
379 Write32(OUT, R600_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
380 gInfo->fb.vramStart >> 12);
381 Write32(OUT, R600_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
382 gInfo->fb.vramEnd >> 12);
383
384 Write32(OUT, R600_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
385 uint32 tmp = ((gInfo->fb.vramEnd >> 24) & 0xFFFF) << 16;
386 tmp |= ((gInfo->fb.vramStart >> 24) & 0xFFFF);
387
388 Write32(OUT, R600_MC_VM_FB_LOCATION, tmp);
389 Write32(OUT, R600_HDP_NONSURFACE_BASE, (gInfo->fb.vramStart >> 8));
390 Write32(OUT, R600_HDP_NONSURFACE_INFO, (2 << 7));
391 Write32(OUT, R600_HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
392
393 // is AGP?
394 // Write32(OUT, R600_MC_VM_AGP_TOP, gInfo->fb.gartEnd >> 22);
395 // Write32(OUT, R600_MC_VM_AGP_BOT, gInfo->fb.gartStart >> 22);
396 // Write32(OUT, R600_MC_VM_AGP_BASE, gInfo->fb.agpBase >> 22);
397 // else?
398 Write32(OUT, R600_MC_VM_AGP_BASE, 0);
399 Write32(OUT, R600_MC_VM_AGP_TOP, 0x0FFFFFFF);
400 Write32(OUT, R600_MC_VM_AGP_BOT, 0x0FFFFFFF);
401
402 if (radeon_gpu_mc_idlewait() != B_OK)
403 ERROR("%s: Modifying non-idle memory controller!\n", __func__);
404
405 radeon_gpu_mc_resume(&gpuState);
406
407 // disable render control
408 Write32(OUT, 0x000300, Read32(OUT, 0x000300) & 0xFFFCFFFF);
409
410 return B_OK;
411 }
412
413
414 static status_t
radeon_gpu_mc_setup_r700()415 radeon_gpu_mc_setup_r700()
416 {
417 // HDP initialization
418 uint32 i;
419 uint32 j;
420 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
421 Write32(OUT, (0x2c14 + j), 0x00000000);
422 Write32(OUT, (0x2c18 + j), 0x00000000);
423 Write32(OUT, (0x2c1c + j), 0x00000000);
424 Write32(OUT, (0x2c20 + j), 0x00000000);
425 Write32(OUT, (0x2c24 + j), 0x00000000);
426 }
427
428 // On r7xx read from HDP_DEBUG1 vs write HDP_REG_COHERENCY_FLUSH_CNTL
429 Read32(OUT, R700_HDP_DEBUG1);
430
431 // idle the memory controller
432 struct gpu_state gpuState;
433 radeon_gpu_mc_halt(&gpuState);
434
435 if (radeon_gpu_mc_idlewait() != B_OK)
436 ERROR("%s: Modifying non-idle memory controller!\n", __func__);
437
438 Write32(OUT, AVIVO_VGA_HDP_CONTROL, AVIVO_VGA_MEMORY_DISABLE);
439
440 // TODO: Memory Controller AGP
441 Write32(OUT, R700_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
442 gInfo->fb.vramStart >> 12);
443 Write32(OUT, R700_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
444 gInfo->fb.vramEnd >> 12);
445
446 Write32(OUT, R700_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
447 uint32 tmp = ((gInfo->fb.vramEnd >> 24) & 0xFFFF) << 16;
448 tmp |= ((gInfo->fb.vramStart >> 24) & 0xFFFF);
449
450 Write32(OUT, R700_MC_VM_FB_LOCATION, tmp);
451 Write32(OUT, R700_HDP_NONSURFACE_BASE, (gInfo->fb.vramStart >> 8));
452 Write32(OUT, R700_HDP_NONSURFACE_INFO, (2 << 7));
453 Write32(OUT, R700_HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
454
455 // is AGP?
456 // Write32(OUT, R700_MC_VM_AGP_TOP, gInfo->fb.gartEnd >> 22);
457 // Write32(OUT, R700_MC_VM_AGP_BOT, gInfo->fb.gartStart >> 22);
458 // Write32(OUT, R700_MC_VM_AGP_BASE, gInfo->fb.agpBase >> 22);
459 // else?
460 Write32(OUT, R700_MC_VM_AGP_BASE, 0);
461 Write32(OUT, R700_MC_VM_AGP_TOP, 0x0FFFFFFF);
462 Write32(OUT, R700_MC_VM_AGP_BOT, 0x0FFFFFFF);
463
464 if (radeon_gpu_mc_idlewait() != B_OK)
465 ERROR("%s: Modifying non-idle memory controller!\n", __func__);
466
467 radeon_gpu_mc_resume(&gpuState);
468
469 // disable render control
470 Write32(OUT, 0x000300, Read32(OUT, 0x000300) & 0xFFFCFFFF);
471
472 return B_OK;
473 }
474
475
476 static status_t
radeon_gpu_mc_setup_evergreen()477 radeon_gpu_mc_setup_evergreen()
478 {
479 // HDP initialization
480 uint32 i;
481 uint32 j;
482 for (i = 0, j = 0; i < 32; i++, j += 0x18) {
483 Write32(OUT, (0x2c14 + j), 0x00000000);
484 Write32(OUT, (0x2c18 + j), 0x00000000);
485 Write32(OUT, (0x2c1c + j), 0x00000000);
486 Write32(OUT, (0x2c20 + j), 0x00000000);
487 Write32(OUT, (0x2c24 + j), 0x00000000);
488 }
489 Write32(OUT, EVERGREEN_HDP_REG_COHERENCY_FLUSH_CNTL, 0);
490
491 // idle the memory controller
492 struct gpu_state gpuState;
493 radeon_gpu_mc_halt(&gpuState);
494
495 if (radeon_gpu_mc_idlewait() != B_OK)
496 ERROR("%s: Modifying non-idle memory controller!\n", __func__);
497
498 Write32(OUT, AVIVO_VGA_HDP_CONTROL, AVIVO_VGA_MEMORY_DISABLE);
499
500 // TODO: Memory Controller AGP
501 Write32(OUT, EVERGREEN_MC_VM_SYSTEM_APERTURE_LOW_ADDR,
502 gInfo->fb.vramStart >> 12);
503 Write32(OUT, EVERGREEN_MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
504 gInfo->fb.vramEnd >> 12);
505
506 Write32(OUT, EVERGREEN_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
507
508 radeon_shared_info &info = *gInfo->shared_info;
509 if ((info.chipsetFlags & CHIP_IGP) != 0) {
510 // Evergreen IGP Fusion
511 uint32 tmp = Read32(OUT, EVERGREEN_MC_FUS_VM_FB_OFFSET)
512 & 0x000FFFFF;
513 tmp |= ((gInfo->fb.vramEnd >> 20) & 0xF) << 24;
514 tmp |= ((gInfo->fb.vramStart >> 20) & 0xF) << 20;
515 Write32(OUT, EVERGREEN_MC_FUS_VM_FB_OFFSET, tmp);
516 }
517
518 uint32 tmp = ((gInfo->fb.vramEnd >> 24) & 0xFFFF) << 16;
519 tmp |= ((gInfo->fb.vramStart >> 24) & 0xFFFF);
520
521 Write32(OUT, EVERGREEN_MC_VM_FB_LOCATION, tmp);
522 Write32(OUT, EVERGREEN_HDP_NONSURFACE_BASE, (gInfo->fb.vramStart >> 8));
523 Write32(OUT, EVERGREEN_HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
524 Write32(OUT, EVERGREEN_HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
525
526 // is AGP?
527 // Write32(OUT, EVERGREEN_MC_VM_AGP_TOP, gInfo->fb.gartEnd >> 16);
528 // Write32(OUT, EVERGREEN_MC_VM_AGP_BOT, gInfo->fb.gartStart >> 16);
529 // Write32(OUT, EVERGREEN_MC_VM_AGP_BASE, gInfo->fb.agpBase >> 22);
530 // else?
531 Write32(OUT, EVERGREEN_MC_VM_AGP_BASE, 0);
532 Write32(OUT, EVERGREEN_MC_VM_AGP_TOP, 0x0FFFFFFF);
533 Write32(OUT, EVERGREEN_MC_VM_AGP_BOT, 0x0FFFFFFF);
534
535 if (radeon_gpu_mc_idlewait() != B_OK)
536 ERROR("%s: Modifying non-idle memory controller!\n", __func__);
537
538 radeon_gpu_mc_resume(&gpuState);
539
540 // disable render control
541 Write32(OUT, 0x000300, Read32(OUT, 0x000300) & 0xFFFCFFFF);
542
543 return B_OK;
544 }
545
546
547 void
radeon_gpu_mc_init()548 radeon_gpu_mc_init()
549 {
550 radeon_shared_info &info = *gInfo->shared_info;
551
552 uint32 fbVMLocationReg;
553 if (info.chipsetID >= RADEON_CEDAR) {
554 fbVMLocationReg = EVERGREEN_MC_VM_FB_LOCATION;
555 } else if (info.chipsetID >= RADEON_RV770) {
556 fbVMLocationReg = R700_MC_VM_FB_LOCATION;
557 } else {
558 fbVMLocationReg = R600_MC_VM_FB_LOCATION;
559 }
560
561 if (gInfo->shared_info->frame_buffer_size > 0)
562 gInfo->fb.valid = true;
563
564 // This is the card internal location.
565 uint64 vramBase = 0;
566
567 if ((info.chipsetFlags & CHIP_IGP) != 0) {
568 vramBase = Read32(OUT, fbVMLocationReg) & 0xFFFF;
569 vramBase <<= 24;
570 }
571
572 gInfo->fb.vramStart = vramBase;
573 gInfo->fb.vramSize = (uint64)gInfo->shared_info->frame_buffer_size * 1024;
574 gInfo->fb.vramEnd = (vramBase + gInfo->fb.vramSize) - 1;
575 }
576
577
578 status_t
radeon_gpu_mc_setup()579 radeon_gpu_mc_setup()
580 {
581 radeon_shared_info &info = *gInfo->shared_info;
582
583 radeon_gpu_mc_init();
584 // init video ram ranges for memory controler
585
586 if (gInfo->fb.valid != true) {
587 ERROR("%s: Memory Controller init failed.\n", __func__);
588 return B_ERROR;
589 }
590
591 TRACE("%s: vramStart: 0x%" B_PRIX64 ", vramEnd: 0x%" B_PRIX64 "\n",
592 __func__, gInfo->fb.vramStart, gInfo->fb.vramEnd);
593
594 if (info.chipsetID >= RADEON_CAYMAN)
595 return radeon_gpu_mc_setup_evergreen(); // also for ni
596 else if (info.chipsetID >= RADEON_CEDAR)
597 return radeon_gpu_mc_setup_evergreen();
598 else if (info.chipsetID >= RADEON_RV770)
599 return radeon_gpu_mc_setup_r700();
600 else if (info.chipsetID >= RADEON_R600)
601 return radeon_gpu_mc_setup_r600();
602
603 return B_ERROR;
604 }
605
606
607 status_t
radeon_gpu_ring_setup()608 radeon_gpu_ring_setup()
609 {
610 TRACE("%s called\n", __func__);
611
612 // init GFX ring queue
613 gInfo->ringQueue[RADEON_QUEUE_TYPE_GFX_INDEX]
614 = new RingQueue(1024 * 1024, RADEON_QUEUE_TYPE_GFX_INDEX);
615
616 #if 0
617 // init IRQ ring queue (reverse of rendering/cp ring queue)
618 gInfo->irqRingQueue
619 = new IRQRingQueue(64 * 1024)
620 #endif
621
622
623 return B_OK;
624 }
625
626
627 status_t
radeon_gpu_ring_boot(uint32 ringType)628 radeon_gpu_ring_boot(uint32 ringType)
629 {
630 TRACE("%s called\n", __func__);
631
632 RingQueue* ring = gInfo->ringQueue[ringType];
633 if (ring == NULL) {
634 ERROR("%s: Specified ring doesn't exist!\n", __func__);
635 return B_ERROR;
636 }
637
638 // We don't execute this code until it's more complete.
639 ERROR("%s: TODO\n", __func__);
640 return B_OK;
641
642
643 // TODO: Write initial ring PACKET3 STATE
644
645 // *** r600_cp_init_ring_buffer
646 // Reset command processor
647 Write32(OUT, GRBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
648 Read32(OUT, GRBM_SOFT_RESET);
649 snooze(15000);
650 Write32(OUT, GRBM_SOFT_RESET, 0);
651
652 // Set ring buffer size
653 uint32 controlScratch = RB_NO_UPDATE
654 | (compute_order(4096 / 8) << 8) // rptr_update_l2qw
655 | compute_order(ring->GetSize() / 8); // size_l2qw
656 #ifdef __BIG_ENDIAN
657 controlScratch |= BUF_SWAP_32BIT;
658 #endif
659 Write32(OUT, CP_RB_CNTL, controlScratch);
660
661 // Set delays and timeouts
662 Write32(OUT, CP_SEM_WAIT_TIMER, 0);
663 Write32(OUT, CP_RB_WPTR_DELAY, 0);
664
665 // Enable RenderBuffer Reads
666 controlScratch |= RB_RPTR_WR_ENA;
667 Write32(OUT, CP_RB_CNTL, controlScratch);
668
669 // Zero out command processor read and write pointers
670 Write32(OUT, CP_RB_RPTR_WR, 0);
671 Write32(OUT, CP_RB_WPTR, 0);
672
673 #if 0
674 int ringPointer = 0;
675 // TODO: AGP cards
676 /*
677 if (RADEON_IS_AGP) {
678 ringPointer = dev_priv->ring_rptr->offset
679 - dev->agp->base
680 + dev_priv->gart_vm_start;
681 } else {
682 */
683 ringPointer = dev_priv->ring_rptr->offset
684 - ((unsigned long) dev->sg->virtual)
685 + dev_priv->gart_vm_start;
686
687 Write32(OUT, CP_RB_RPTR_ADDR, (ringPointer & 0xfffffffc));
688 Write32(OUT, CP_RB_RPTR_ADDR_HI, upper_32_bits(ringPointer));
689
690 // Drop RPTR_WR_ENA and update CP RB Control
691 controlScratch &= ~R600_RB_RPTR_WR_ENA;
692 Write32(OUT, CP_RB_CNTL, controlScratch);
693 #endif
694
695 #if 0
696 // Update command processor pointer
697 int commandPointer = 0;
698
699 // TODO: AGP cards
700 /*
701 if (RADEON_IS_AGP) {
702 commandPointer = (dev_priv->cp_ring->offset
703 - dev->agp->base
704 + dev_priv->gart_vm_start);
705 }
706 */
707 commandPointer = (dev_priv->cp_ring->offset
708 - (unsigned long)dev->sg->virtual
709 + dev_priv->gart_vm_start);
710 #endif
711
712 #if 0
713 Write32(OUT, CP_RB_BASE, commandPointer >> 8);
714 Write32(OUT, CP_ME_CNTL, 0xff);
715 Write32(OUT, CP_DEBUG, (1 << 27) | (1 << 28));
716 #endif
717
718 #if 0
719 // Initialize scratch register pointer.
720 // This wil lcause the scratch register values to be wtitten
721 // to memory whenever they are updated.
722
723 uint64 scratchAddr = Read32(OUT, CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
724 scratchAddr |= ((uint64)Read32(OUT, CP_RB_RPTR_ADDR_HI)) << 32;
725 scratchAddr += R600_SCRATCH_REG_OFFSET;
726 scratchAddr >>= 8;
727 scratchAddr &= 0xffffffff;
728
729 Write32(OUT, R600_SCRATCH_ADDR, (uint32)scratchAddr);
730
731 Write32(OUT, R600_SCRATCH_UMSK, 0x7);
732 #endif
733
734 #if 0
735 // Enable bus mastering
736 radeon_enable_bm(dev_priv);
737
738 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(0), 0);
739 Write32(OUT, R600_LAST_FRAME_REG, 0);
740
741 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(1), 0);
742 Write32(OUT, R600_LAST_DISPATCH_REG, 0);
743
744 radeon_write_ring_rptr(dev_priv, R600_SCRATCHOFF(2), 0);
745 Write32(OUT, R600_LAST_CLEAR_REG, 0);
746 #endif
747
748 // Reset sarea?
749 #if 0
750 master_priv = file_priv->master->driver_priv;
751 if (master_priv->sarea_priv) {
752 master_priv->sarea_priv->last_frame = 0;
753 master_priv->sarea_priv->last_dispatch = 0;
754 master_priv->sarea_priv->last_clear = 0;
755 }
756
757 r600_do_wait_for_idle(dev_priv);
758 #endif
759
760 return B_OK;
761 }
762
763
764 status_t
radeon_gpu_ss_control(pll_info * pll,bool enable)765 radeon_gpu_ss_control(pll_info* pll, bool enable)
766 {
767 TRACE("%s called\n", __func__);
768
769 radeon_shared_info &info = *gInfo->shared_info;
770 uint32 ssControl;
771
772 if (info.chipsetID >= RADEON_CEDAR) {
773 switch (pll->id) {
774 case ATOM_PPLL1:
775 // PLL1
776 ssControl = Read32(OUT, EVERGREEN_P1PLL_SS_CNTL);
777 if (enable)
778 ssControl |= EVERGREEN_PxPLL_SS_EN;
779 else
780 ssControl &= ~EVERGREEN_PxPLL_SS_EN;
781 Write32(OUT, EVERGREEN_P1PLL_SS_CNTL, ssControl);
782 break;
783 case ATOM_PPLL2:
784 // PLL2
785 ssControl = Read32(OUT, EVERGREEN_P2PLL_SS_CNTL);
786 if (enable)
787 ssControl |= EVERGREEN_PxPLL_SS_EN;
788 else
789 ssControl &= ~EVERGREEN_PxPLL_SS_EN;
790 Write32(OUT, EVERGREEN_P2PLL_SS_CNTL, ssControl);
791 break;
792 }
793 // DCPLL, no action
794 return B_OK;
795 } else if (info.chipsetID >= RADEON_RS600) {
796 switch (pll->id) {
797 case ATOM_PPLL1:
798 // PLL1
799 ssControl = Read32(OUT, AVIVO_P1PLL_INT_SS_CNTL);
800 if (enable)
801 ssControl |= 1;
802 else
803 ssControl &= ~1;
804 Write32(OUT, AVIVO_P1PLL_INT_SS_CNTL, ssControl);
805 break;
806 case ATOM_PPLL2:
807 // PLL2
808 ssControl = Read32(OUT, AVIVO_P2PLL_INT_SS_CNTL);
809 if (enable)
810 ssControl |= 1;
811 else
812 ssControl &= ~1;
813 Write32(OUT, AVIVO_P2PLL_INT_SS_CNTL, ssControl);
814 break;
815 }
816 // DCPLL, no action
817 return B_OK;
818 }
819
820 return B_ERROR;
821 }
822