xref: /haiku/src/add-ons/kernel/drivers/graphics/radeon/CP_setup.c (revision 1214ef1b2100f2b3299fc9d8d6142e46f70a4c3f)
1 /*
2 	Copyright (c) 2002, Thomas Kurschel
3 
4 
5 	Part of Radeon accelerant
6 
7 	CP initialization/sync/cleanup.
8 
9 	It also handles command buffer synchronization.
10 
11 	non-local memory is used as following:
12 	- 2048 dwords for ring buffer
13 	- 253 indirect buffers a 4k (1024 dwords)
14 	- 8 dwords for returned data (i.e. current read ptr)
15 	  & 6 dwords for "scratch registers"
16 
17 	usage of scratch registers:
18 	- reg 0 = reached engine.count
19 
20 	with a granularity of 4 KByte, we need 2+253+1=256 blocks, which is exactly 1 MB
21 */
22 
23 #include "radeon_driver.h"
24 #include "CPMicroCode.h"
25 #include "mmio.h"
26 #include "cp_regs.h"
27 #include "pll_regs.h"
28 #include "rbbm_regs.h"
29 #include "buscntrl_regs.h"
30 #include "config_regs.h"
31 #include "memcntrl_regs.h"
32 #include "utils.h"
33 #include "pll_access.h"
34 
35 #include "log_coll.h"
36 #include "log_enum.h"
37 
38 #include <string.h>
39 
40 #if 0
41 
42 // macros for user-space
43 
44 #define ALLOC_MEM( asize, mem_type, aglobal, handle, offset ) \
45 	{ \
46 		radeon_alloc_mem am; \
47 \
48 		am.magic = RADEON_PRIVATE_DATA_MAGIC; \
49 		am.size = (asize) * 4; \
50 		am.memory_type = (mt_nonlocal); \
51 		am.global = (aglobal); \
52 \
53 		res = ioctl( ai->fd, RADEON_ALLOC_MEM, &am ); \
54 		if( res == B_OK ) \
55 			*(handle) = am.handle; \
56 			*(offset) = am.offset; \
57 	}
58 
59 #define MEM2CPU( mem ) \
60 	((uint32 *)(ai->mapped_memory[(mem).memory_type].data + (mem).offset))
61 
62 #define MEM2GC( mem ) ((mem).offset + si->memory[(mem).memory_type].virtual_addr_start)
63 
64 #define FREE_MEM( mem_type, handle ) \
65 	{ \
66 		radeon_free_mem fm; \
67 \
68 		fm.magic = RADEON_PRIVATE_DATA_MAGIC; \
69 		fm.memory_type = mem_type; \
70 		fm.handle = offset; \
71 \
72 		ioctl( ai->fd, RADEON_FREE_MEM, &fm ); \
73 	}
74 
75 #else
76 
77 // macros for kernel-space
78 
79 // allocate memory
80 // if memory_type is non-local, it is replaced with default non-local type
81 #define ALLOC_MEM( asize, mem_type, aglobal, handle, offset ) \
82 	if( mem_type == mt_nonlocal ) \
83 		mem_type = di->si->nonlocal_type; \
84 	res = mem_alloc( di->memmgr[mem_type], asize, NULL, handle, offset );
85 
86 // get address as seen by program to access allocated memory
87 // (memory_type must _not_ be non-local, see ALLOC_MEM)
88 #define MEM2CPU( memory_type, offset ) \
89 	((uint8 *)(memory_type == mt_local ? di->si->local_mem : \
90 	(memory_type == mt_PCI ? di->pci_gart.buffer.ptr : di->agp_gart.buffer.ptr)) \
91 	+ (offset))
92 
93 // get graphics card's virtual address of allocated memory
94 // (memory_type must _not_ be non-local, see ALLOC_MEM)
95 #define MEM2GC( memory_type, offset ) \
96 	(di->si->memory[(memory_type)].virtual_addr_start + (offset))
97 
98 // free memory
99 // if memory_type is non-local, it is replaced with default non-local type
100 #define FREE_MEM( mem_type, handle ) \
101 	mem_free( \
102 		di->memmgr[ mem_type == mt_nonlocal ? di->si->nonlocal_type : mem_type], \
103 		handle, NULL );
104 
105 #endif
106 
107 
108 void Radeon_DiscardAllIndirectBuffers( device_info *di );
109 
110 #define RADEON_SCRATCH_REG_OFFSET	32
111 
112 
113 void Radeon_FlushPixelCache( device_info *di );
114 
115 // wait until engine is idle;
116 // acquire_lock - 	true, if lock must be hold
117 //					false, if lock is already acquired
118 // keep_lock -		true, keep lock on exit (only valid if acquire_lock is true)
119 void Radeon_WaitForIdle( device_info *di, bool acquire_lock, bool keep_lock )
120 {
121 	if( acquire_lock )
122 		ACQUIRE_BEN( di->si->cp.lock );
123 
124 	Radeon_WaitForFifo( di, 64 );
125 
126 	while( 1 ) {
127 		bigtime_t start_time = system_time();
128 
129 		do {
130 			if( (INREG( di->regs, RADEON_RBBM_STATUS ) & RADEON_RBBM_ACTIVE) == 0 ) {
131 				Radeon_FlushPixelCache( di );
132 
133 				if( acquire_lock && !keep_lock)
134 					RELEASE_BEN( di->si->cp.lock );
135 
136 				return;
137 			}
138 
139 			snooze( 1 );
140 		} while( system_time() - start_time < 1000000 );
141 
142 		SHOW_ERROR( 3, "Engine didn't become idle (rbbm_status=%lx, cp_stat=%lx, tlb_address=%lx, tlb_data=%lx)",
143 			INREG( di->regs, RADEON_RBBM_STATUS ),
144 			INREG( di->regs, RADEON_CP_STAT ),
145 			INREG( di->regs, RADEON_AIC_TLB_ADDR ),
146 			INREG( di->regs, RADEON_AIC_TLB_DATA ));
147 
148 		LOG( di->si->log, _Radeon_WaitForIdle );
149 
150 		Radeon_ResetEngine( di );
151 	}
152 }
153 
154 
155 // wait until "entries" FIFO entries are empty
156 // lock must be hold
157 void Radeon_WaitForFifo( device_info *di, int entries )
158 {
159 	while( 1 ) {
160 		bigtime_t start_time = system_time();
161 
162 		do {
163 			int slots = INREG( di->regs, RADEON_RBBM_STATUS ) & RADEON_RBBM_FIFOCNT_MASK;
164 
165 			if ( slots >= entries )
166 				return;
167 
168 			snooze( 1 );
169 		} while( system_time() - start_time < 1000000 );
170 
171 		LOG( di->si->log, _Radeon_WaitForFifo );
172 
173 		Radeon_ResetEngine( di );
174 	}
175 }
176 
177 // flush pixel cache of graphics card
178 void Radeon_FlushPixelCache( device_info *di )
179 {
180 	bigtime_t start_time;
181 
182 	OUTREGP( di->regs, RADEON_RB2D_DSTCACHE_CTLSTAT, RADEON_RB2D_DC_FLUSH_ALL,
183 		~RADEON_RB2D_DC_FLUSH_ALL );
184 
185 	start_time = system_time();
186 
187 	do {
188 		if( (INREG( di->regs, RADEON_RB2D_DSTCACHE_CTLSTAT )
189 			 & RADEON_RB2D_DC_BUSY) == 0 )
190 			return;
191 
192 		snooze( 1 );
193 	} while( system_time() - start_time < 1000000 );
194 
195 	LOG( di->si->log, _Radeon_FlushPixelCache );
196 
197 	SHOW_ERROR0( 0, "pixel cache didn't become empty" );
198 }
199 
200 // reset graphics card's engine
201 // lock must be hold
202 void Radeon_ResetEngine( device_info *di )
203 {
204 	vuint8 *regs = di->regs;
205 	shared_info *si = di->si;
206 	uint32 clock_cntl_index, mclk_cntl, rbbm_soft_reset, host_path_cntl;
207 	uint32 cur_read_ptr;
208 
209 	SHOW_FLOW0( 3, "" );
210 
211 	Radeon_FlushPixelCache( di );
212 
213 	clock_cntl_index = INREG( regs, RADEON_CLOCK_CNTL_INDEX );
214 	RADEONPllErrataAfterIndex( regs, di->asic );	// drm has no errata here!
215 	mclk_cntl = Radeon_INPLL( regs, di->asic, RADEON_MCLK_CNTL );
216 
217 	// enable clock of units to be reset
218 	Radeon_OUTPLL( regs, di->asic, RADEON_MCLK_CNTL, mclk_cntl |
219       RADEON_FORCEON_MCLKA |
220       RADEON_FORCEON_MCLKB |
221       RADEON_FORCEON_YCLKA |
222       RADEON_FORCEON_YCLKB |
223       RADEON_FORCEON_MC |
224       RADEON_FORCEON_AIC );
225 
226 	// do the reset
227     host_path_cntl = INREG( regs, RADEON_HOST_PATH_CNTL );
228 	rbbm_soft_reset = INREG( regs, RADEON_RBBM_SOFT_RESET );
229 
230 	OUTREG( regs, RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
231 			RADEON_SOFT_RESET_CP |
232 			RADEON_SOFT_RESET_HI |
233 			RADEON_SOFT_RESET_SE |
234 			RADEON_SOFT_RESET_RE |
235 			RADEON_SOFT_RESET_PP |
236 			RADEON_SOFT_RESET_E2 |
237 			RADEON_SOFT_RESET_RB ) );
238 	INREG( regs, RADEON_RBBM_SOFT_RESET);
239 	OUTREG( regs, RADEON_RBBM_SOFT_RESET, rbbm_soft_reset &
240 		~( RADEON_SOFT_RESET_CP |
241 		   RADEON_SOFT_RESET_HI |
242 		   RADEON_SOFT_RESET_SE |
243 		   RADEON_SOFT_RESET_RE |
244 		   RADEON_SOFT_RESET_PP |
245 		   RADEON_SOFT_RESET_E2 |
246 		   RADEON_SOFT_RESET_RB ) );
247 	INREG( regs, RADEON_RBBM_SOFT_RESET);
248 
249     OUTREG( regs, RADEON_HOST_PATH_CNTL, host_path_cntl | RADEON_HDP_SOFT_RESET );
250     INREG( regs, RADEON_HOST_PATH_CNTL );
251     OUTREG( regs, RADEON_HOST_PATH_CNTL, host_path_cntl );
252 
253 	Radeon_OUTPLL( regs, di->asic, RADEON_MCLK_CNTL, mclk_cntl );
254    	OUTREG( regs, RADEON_CLOCK_CNTL_INDEX, clock_cntl_index );
255    	//RADEONPllErrataAfterIndex( regs, di->asic ); // drm doesn't do this here!
256    	OUTREG( regs, RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
257 
258 	if ( di->acc_dma )
259 	{
260 		// reset ring buffer
261 		cur_read_ptr = INREG( regs, RADEON_CP_RB_RPTR );
262 		OUTREG( regs, RADEON_CP_RB_WPTR, cur_read_ptr );
263 
264 		//if( si->cp.ring.head ) {
265 		// during init, there are no feedback data
266 		if( si->cp.feedback.mem_handle != 0 ) {
267 			*(uint32 *)MEM2CPU( si->cp.feedback.mem_type, si->cp.feedback.head_mem_offset) =
268 				cur_read_ptr;
269 			//	*si->cp.ring.head = cur_read_ptr;
270 			si->cp.ring.tail = cur_read_ptr;
271 		}
272 
273 		// mark all buffers as being finished
274 		Radeon_DiscardAllIndirectBuffers( di );
275 	}
276 
277 	++si->engine.count;
278 	return;
279 }
280 
281 
282 // upload Micro-Code of CP
283 static void loadMicroEngineRAMData( device_info *di )
284 {
285 	int i;
286 	const uint32 (*microcode)[2];
287 
288 	SHOW_FLOW0( 3, "" );
289 
290 	switch( di->asic ) {
291 	case rt_r300:
292 	case rt_rv350:
293 	case rt_r350:
294 	case rt_rv380:
295 	case rt_r420:
296 		microcode = r300_cp_microcode;
297 		break;
298 	case rt_r200:
299 		microcode = r200_cp_microcode;
300 		break;
301 	case rt_rs100:
302 	default:
303 		microcode = radeon_cp_microcode;
304 	}
305 
306 	Radeon_WaitForIdle( di, false, false );
307 
308 	OUTREG( di->regs, RADEON_CP_ME_RAM_ADDR, 0 );
309 
310 	for ( i = 0 ; i < 256 ; i++ ) {
311 		OUTREG( di->regs, RADEON_CP_ME_RAM_DATAH, microcode[i][1] );
312 		OUTREG( di->regs, RADEON_CP_ME_RAM_DATAL, microcode[i][0] );
313 	}
314 }
315 
316 // aring_size - size of ring in dwords
317 static status_t initRingBuffer( device_info *di, int aring_size )
318 {
319 	status_t res;
320 	shared_info *si = di->si;
321 	CP_info *cp = &si->cp;
322 	vuint8 *regs = di->regs;
323 	int32 offset;
324 	memory_type_e memory_type;
325 
326 	memset( &cp->ring, 0, sizeof( cp->ring ));
327 
328 	// ring and indirect buffers can be either in AGP or PCI GART
329 	// (it seems that they cannot be in graphics memory, at least
330 	//  I had serious coherency problems when I tried that)
331 	memory_type = mt_nonlocal;
332 
333 	ALLOC_MEM( aring_size * 4, memory_type, true,
334 		&cp->ring.mem_handle, &offset );
335 
336 	if( res != B_OK ) {
337 		SHOW_ERROR0( 0, "Cannot allocate ring buffer" );
338 		return res;
339 	}
340 
341 	// setup CP buffer
342 	cp->ring.mem_type = memory_type;
343 	cp->ring.mem_offset = offset;
344 	cp->ring.vm_base = MEM2GC( memory_type, offset );
345 	cp->ring.size = aring_size;
346 	cp->ring.tail_mask = aring_size - 1;
347 	OUTREG( regs, RADEON_CP_RB_BASE, cp->ring.vm_base );
348 	SHOW_INFO( 3, "CP buffer address=%lx", cp->ring.vm_base );
349 
350 	// set ring buffer size
351 	// (it's log2 of qwords)
352 	OUTREG( regs, RADEON_CP_RB_CNTL, log2( cp->ring.size / 2 ));
353 	SHOW_INFO( 3, "CP buffer size mask=%d", log2( cp->ring.size / 2 ) );
354 
355 	// set write pointer delay to zero;
356 	// we assume that memory synchronization is done correctly my MoBo
357 	// and Radeon_SendCP contains a hack that hopefully fixes such problems
358 	OUTREG( regs, RADEON_CP_RB_WPTR_DELAY, 0 );
359 
360 	memset( MEM2CPU( cp->ring.mem_type, cp->ring.mem_offset), 0, cp->ring.size * 4 );
361 
362 	// set CP buffer pointers
363 	OUTREG( regs, RADEON_CP_RB_RPTR, 0 );
364 	OUTREG( regs, RADEON_CP_RB_WPTR, 0 );
365 	//*cp->ring.head = 0;
366 	cp->ring.tail = 0;
367 
368 	return B_OK;
369 }
370 
371 static void uninitRingBuffer( device_info *di )
372 {
373 	vuint8 *regs = di->regs;
374 
375 	// abort any activity
376 	Radeon_ResetEngine( di );
377 
378 	// disable CP BM
379 	OUTREG( regs, RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS );
380 	// read-back for flushing
381 	INREG( regs, RADEON_CP_CSQ_CNTL );
382 
383 	FREE_MEM( mt_nonlocal, di->si->cp.ring.mem_handle );
384 }
385 
386 static status_t initCPFeedback( device_info *di )
387 {
388 	CP_info *cp = &di->si->cp;
389 	vuint8 *regs = di->regs;
390 	int32 offset;
391 	memory_type_e memory_type;
392 	status_t res;
393 
394 	// status information should be in PCI memory, so CPU can
395 	// poll it without locking the bus (PCI memory is the only
396 	// cachable memory available)
397 	memory_type = mt_PCI;
398 
399 	ALLOC_MEM( RADEON_SCRATCH_REG_OFFSET + 0x40, memory_type, true,
400 		&cp->feedback.mem_handle, &offset );
401 
402 	if( res != B_OK ) {
403 		SHOW_ERROR0( 0, "Cannot allocate buffers for status information" );
404 		return res;
405 	}
406 
407 	// setup CP read pointer buffer
408 	cp->feedback.mem_type = memory_type;
409 	cp->feedback.head_mem_offset = offset;
410 	cp->feedback.head_vm_address = MEM2GC( memory_type, cp->feedback.head_mem_offset );
411 	OUTREG( regs, RADEON_CP_RB_RPTR_ADDR, cp->feedback.head_vm_address );
412 	SHOW_INFO( 3, "CP read pointer buffer==%lx", cp->feedback.head_vm_address );
413 
414 	// setup scratch register buffer
415 	cp->feedback.scratch_mem_offset = offset + RADEON_SCRATCH_REG_OFFSET;
416 	cp->feedback.scratch_vm_start = MEM2GC( memory_type, cp->feedback.scratch_mem_offset );
417 	OUTREG( regs, RADEON_SCRATCH_ADDR, cp->feedback.scratch_vm_start );
418 	OUTREG( regs, RADEON_SCRATCH_UMSK, 0x3f );
419 
420 	*(uint32 *)MEM2CPU( cp->feedback.mem_type, cp->feedback.head_mem_offset) = 0;
421 	memset( MEM2CPU( cp->feedback.mem_type, cp->feedback.scratch_mem_offset), 0, 0x40 );
422 	//*cp->ring.head = 0;
423 
424 	return B_OK;
425 }
426 
427 static void uninitCPFeedback( device_info *di )
428 {
429 	vuint8 *regs = di->regs;
430 
431 	// don't allow any scratch buffer update
432 	OUTREG( regs, RADEON_SCRATCH_UMSK, 0x0 );
433 
434 	FREE_MEM( mt_PCI, di->si->cp.feedback.mem_handle );
435 }
436 
437 static status_t initIndirectBuffers( device_info *di )
438 {
439 	CP_info *cp = &di->si->cp;
440 	int32 offset;
441 	memory_type_e memory_type;
442 	int i;
443 	status_t res;
444 
445 	memory_type = mt_nonlocal;
446 
447 	ALLOC_MEM( NUM_INDIRECT_BUFFERS * INDIRECT_BUFFER_SIZE * 4, memory_type,
448 		true, &cp->buffers.mem_handle, &offset );
449 
450 	if( res != B_OK ) {
451 		SHOW_ERROR0( 0, "Cannot allocate indirect buffers" );
452 		return B_ERROR;
453 	}
454 
455 	cp->buffers.mem_type = memory_type;
456 	cp->buffers.mem_offset = offset;
457 	cp->buffers.vm_start = MEM2GC( memory_type, cp->buffers.mem_offset );
458 
459 	for( i = 0; i < NUM_INDIRECT_BUFFERS - 1; ++i ) {
460 		cp->buffers.buffers[i].next = i + 1;
461 	}
462 
463 	cp->buffers.buffers[i].next = -1;
464 
465 	cp->buffers.free_list = 0;
466 	cp->buffers.oldest = -1;
467 	cp->buffers.newest = -1;
468 	cp->buffers.active_state = -1;
469 	cp->buffers.cur_tag = 0;
470 
471 	memset( MEM2CPU( cp->buffers.mem_type, cp->buffers.mem_offset), 0,
472 		NUM_INDIRECT_BUFFERS * INDIRECT_BUFFER_SIZE * 4 );
473 
474 	return B_OK;
475 }
476 
477 static void uninitIndirectBuffers( device_info *di )
478 {
479 	FREE_MEM( mt_nonlocal, di->si->cp.buffers.mem_handle );
480 }
481 
482 // initialize CP so it's ready for BM
483 status_t Radeon_InitCP( device_info *di )
484 {
485 	thread_id thid;
486     thread_info thinfo;
487 	status_t res;
488 
489 	SHOW_FLOW0( 3, "" );
490 
491 	// this is _really_ necessary so functions like ResetEngine() know
492 	// that the CP is not set up yet
493 	memset( &di->si->cp, 0, sizeof( di->si->cp ));
494 
495 	if( (res = INIT_BEN( di->si->cp.lock, "Radeon CP" )) < 0 )
496 		return res;
497 
498 	// HACK: change owner of benaphore semaphore to team of calling thread;
499 	// reason: user code cannot acquire kernel semaphores, but the accelerant
500 	// is in user space; interestingly, it's enough to change the semaphore's
501 	// owner to _any_ non-system team (that's the only security check done by
502 	// the kernel)
503 	thid = find_thread( NULL );
504     get_thread_info( thid, &thinfo );
505     set_sem_owner( di->si->cp.lock.sem, thinfo.team );
506 
507 	// init raw CP
508 	if ( di->acc_dma ) loadMicroEngineRAMData( di );
509 
510 	// do soft-reset
511 	Radeon_ResetEngine( di );
512 
513 	// after warm-reset, the CP may still be active and thus react to
514 	// register writes during initialization unpredictably, so we better
515 	// stop it first
516 	OUTREG( di->regs, RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS );
517 	INREG( di->regs, RADEON_CP_CSQ_CNTL );
518 
519 	// reset CP to make disabling active
520 	Radeon_ResetEngine( di );
521 
522 	if ( di->acc_dma )
523 	{
524 		res = initRingBuffer( di, CP_RING_SIZE );
525 		if( res < 0 )
526 			goto err4;
527 
528 		res = initCPFeedback( di );
529 		if( res < 0 )
530 			goto err3;
531 
532 		res = initIndirectBuffers( di );
533 		if( res < 0 )
534 			goto err2;
535 
536 		// tell CP to use BM
537 		Radeon_WaitForIdle( di, false, false );
538 
539 		// enable direct and indirect CP bus mastering
540 		OUTREG( di->regs, RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM );
541 
542 		// allow bus mastering in general
543 		OUTREGP( di->regs, RADEON_BUS_CNTL, 0, ~RADEON_BUS_MASTER_DIS );
544 	}
545 
546 
547 	// don't allow mixing of 2D/3D/scratch/wait_until commands
548 	// (in fact, this doesn't seem to make any difference as we do a
549 	// manual sync in all these cases anyway)
550 	OUTREG( di->regs, RADEON_ISYNC_CNTL,
551 		RADEON_ISYNC_ANY2D_IDLE3D |
552 		RADEON_ISYNC_ANY3D_IDLE2D |
553 		RADEON_ISYNC_WAIT_IDLEGUI |
554 		RADEON_ISYNC_CPSCRATCH_IDLEGUI );
555 
556 	SHOW_FLOW( 3, "bus_cntl=%lx", INREG( di->regs, RADEON_BUS_CNTL ));
557 
558 	SHOW_FLOW0( 3, "Done" );
559 
560 	return B_OK;
561 
562 //err:
563 //	uninitIndirectBuffers( ai );
564 err2:
565 	uninitCPFeedback( di );
566 err3:
567 	uninitRingBuffer( di );
568 err4:
569 	DELETE_BEN( di->si->cp.lock );
570 	return res;
571 }
572 
573 
574 // shutdown CP, freeing any memory
575 void Radeon_UninitCP( device_info *di )
576 {
577 	vuint8 *regs = di->regs;
578 
579 	// abort any pending commands
580 	Radeon_ResetEngine( di );
581 
582 	// disable CP BM
583 	OUTREG( regs, RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS );
584 	// read-back for flushing
585 	INREG( regs, RADEON_CP_CSQ_CNTL );
586 
587 	if ( di->acc_dma )
588 	{
589 		uninitRingBuffer( di );
590 		uninitCPFeedback( di );
591 		uninitIndirectBuffers( di );
592 	}
593 
594 	DELETE_BEN( di->si->cp.lock );
595 }
596 
597 
598 // mark all indirect buffers as being free;
599 // this should only be called after a reset;
600 // lock must be hold
601 void Radeon_DiscardAllIndirectBuffers( device_info *di )
602 {
603 	CP_info *cp = &di->si->cp;
604 
605 	// during init, there is no indirect buffer
606 	if( cp->buffers.mem_handle == 0 )
607 		return;
608 
609 	// mark all sent indirect buffers as free
610 	while( cp->buffers.oldest != -1 ) {
611 		indirect_buffer *oldest_buffer =
612 			&cp->buffers.buffers[cp->buffers.oldest];
613 		int tmp_oldest_buffer;
614 
615 		SHOW_FLOW( 0, "%d", cp->buffers.oldest );
616 
617 		// remove buffer from "used" list
618 		tmp_oldest_buffer = oldest_buffer->next;
619 
620 		if( tmp_oldest_buffer == -1 )
621 			cp->buffers.newest = -1;
622 
623 		// put it on free list
624 		oldest_buffer->next = cp->buffers.free_list;
625 		cp->buffers.free_list = cp->buffers.oldest;
626 
627 		cp->buffers.oldest = tmp_oldest_buffer;
628 	}
629 }
630 
631 // lets hide this in here, as it's got lots of lovely register headers already...
632 // does it go here, or in the accelerant anyway?
633 // for now i'm assuming you turn on dynamic clocks, and they take care of themselves onwards...
634 // so doing it at driver init seems sensible after a valid detection of course...
635 void Radeon_SetDynamicClock( device_info *di, int mode)
636 {
637     vuint8 *regs = di->regs;
638     radeon_type asic = di->asic;
639     uint32 tmp;
640 
641     switch(mode) {
642 	case 0: /* Turn everything OFF (ForceON to everything)*/
643 		if ( di->num_crtc != 2 ) {
644 			tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_CNTL);
645 			tmp |= (RADEON_SCLK_FORCE_CP   | RADEON_SCLK_FORCE_HDP |
646 				RADEON_SCLK_FORCE_DISP1 | RADEON_SCLK_FORCE_TOP |
647 				RADEON_SCLK_FORCE_E2   | RADEON_SCLK_FORCE_SE  |
648 				RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_VIP |
649 				RADEON_SCLK_FORCE_RE   | RADEON_SCLK_FORCE_PB  |
650 				RADEON_SCLK_FORCE_TAM  | RADEON_SCLK_FORCE_TDM |
651 				RADEON_SCLK_FORCE_RB);
652 			Radeon_OUTPLL(regs, asic, RADEON_SCLK_CNTL, tmp);
653 		} else if (asic == rt_rv350) {
654 			/* for RV350/M10, no delays are required. */
655 			tmp = Radeon_INPLL(regs, asic, R300_SCLK_CNTL2);
656 			tmp |= (R300_SCLK_FORCE_TCL |
657 				R300_SCLK_FORCE_GA  |
658 				R300_SCLK_FORCE_CBA);
659 			Radeon_OUTPLL(regs, asic, R300_SCLK_CNTL2, tmp);
660 
661 			tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_CNTL);
662 			tmp |= (RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP      |
663 				RADEON_SCLK_FORCE_HDP   | RADEON_SCLK_FORCE_DISP1   |
664 				RADEON_SCLK_FORCE_TOP   | RADEON_SCLK_FORCE_E2      |
665 				R300_SCLK_FORCE_VAP     | RADEON_SCLK_FORCE_IDCT    |
666 				RADEON_SCLK_FORCE_VIP   | R300_SCLK_FORCE_SR        |
667 				R300_SCLK_FORCE_PX      | R300_SCLK_FORCE_TX        |
668 				R300_SCLK_FORCE_US      | RADEON_SCLK_FORCE_TV_SCLK |
669 				R300_SCLK_FORCE_SU      | RADEON_SCLK_FORCE_OV0);
670 			Radeon_OUTPLL(regs, asic, RADEON_SCLK_CNTL, tmp);
671 
672 			tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_MORE_CNTL);
673 			tmp |= RADEON_SCLK_MORE_FORCEON;
674 			Radeon_OUTPLL(regs, asic, RADEON_SCLK_MORE_CNTL, tmp);
675 
676 			tmp = Radeon_INPLL(regs, asic, RADEON_MCLK_CNTL);
677 			tmp |= (RADEON_FORCEON_MCLKA |
678 				RADEON_FORCEON_MCLKB |
679 				RADEON_FORCEON_YCLKA |
680 				RADEON_FORCEON_YCLKB |
681 				RADEON_FORCEON_MC);
682 			Radeon_OUTPLL(regs, asic, RADEON_MCLK_CNTL, tmp);
683 
684 			tmp = Radeon_INPLL(regs, asic, RADEON_VCLK_ECP_CNTL);
685 			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb  |
686 				RADEON_PIXCLK_DAC_ALWAYS_ONb |
687 			R300_DISP_DAC_PIXCLK_DAC_BLANK_OFF);
688 			Radeon_OUTPLL(regs, asic, RADEON_VCLK_ECP_CNTL, tmp);
689 
690 			tmp = Radeon_INPLL(regs, asic, RADEON_PIXCLKS_CNTL);
691 			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb         |
692 				RADEON_PIX2CLK_DAC_ALWAYS_ONb     |
693 				RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
694 				R300_DVOCLK_ALWAYS_ONb            |
695 				RADEON_PIXCLK_BLEND_ALWAYS_ONb    |
696 				RADEON_PIXCLK_GV_ALWAYS_ONb       |
697 				R300_PIXCLK_DVO_ALWAYS_ONb        |
698 				RADEON_PIXCLK_LVDS_ALWAYS_ONb     |
699 				RADEON_PIXCLK_TMDS_ALWAYS_ONb     |
700 				R300_PIXCLK_TRANS_ALWAYS_ONb      |
701 				R300_PIXCLK_TVO_ALWAYS_ONb        |
702 				R300_P2G2CLK_ALWAYS_ONb            |
703 				R300_P2G2CLK_ALWAYS_ONb           |
704 				R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF);
705 			Radeon_OUTPLL(regs, asic, RADEON_PIXCLKS_CNTL, tmp);
706 		}  else {
707 			tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_CNTL);
708 			tmp |= (RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_E2);
709 			tmp |= RADEON_SCLK_FORCE_SE;
710 
711 			if ( di->num_crtc != 2 ) {
712 				tmp |= ( RADEON_SCLK_FORCE_RB    |
713 				RADEON_SCLK_FORCE_TDM   |
714 				RADEON_SCLK_FORCE_TAM   |
715 				RADEON_SCLK_FORCE_PB    |
716 				RADEON_SCLK_FORCE_RE    |
717 				RADEON_SCLK_FORCE_VIP   |
718 				RADEON_SCLK_FORCE_IDCT  |
719 				RADEON_SCLK_FORCE_TOP   |
720 				RADEON_SCLK_FORCE_DISP1 |
721 				RADEON_SCLK_FORCE_DISP2 |
722 				RADEON_SCLK_FORCE_HDP    );
723 			} else if ((asic == rt_r300) || (asic == rt_r350)) {
724 				tmp |= ( RADEON_SCLK_FORCE_HDP   |
725 					RADEON_SCLK_FORCE_DISP1 |
726 					RADEON_SCLK_FORCE_DISP2 |
727 					RADEON_SCLK_FORCE_TOP   |
728 					RADEON_SCLK_FORCE_IDCT  |
729 					RADEON_SCLK_FORCE_VIP);
730 			}
731 			Radeon_OUTPLL(regs, asic, RADEON_SCLK_CNTL, tmp);
732 
733 			snooze(16000);
734 
735 			if ((asic == rt_r300) || (asic == rt_r350)) {
736 				tmp = Radeon_INPLL(regs, asic, R300_SCLK_CNTL2);
737 				tmp |= ( R300_SCLK_FORCE_TCL |
738 					R300_SCLK_FORCE_GA  |
739 					R300_SCLK_FORCE_CBA);
740 				Radeon_OUTPLL(regs, asic, R300_SCLK_CNTL2, tmp);
741 				snooze(16000);
742 			}
743 
744 			if (di->is_igp) {
745 				tmp = Radeon_INPLL(regs, asic, RADEON_MCLK_CNTL);
746 				tmp &= ~(RADEON_FORCEON_MCLKA |
747 					RADEON_FORCEON_YCLKA);
748 				Radeon_OUTPLL(regs, asic, RADEON_MCLK_CNTL, tmp);
749 				snooze(16000);
750 			}
751 
752 			if ((asic == rt_rv200) ||
753 				(asic == rt_rv250) ||
754 				(asic == rt_rv280)) {
755 				tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_MORE_CNTL);
756 				tmp |= RADEON_SCLK_MORE_FORCEON;
757 				Radeon_OUTPLL(regs, asic, RADEON_SCLK_MORE_CNTL, tmp);
758 				snooze(16000);
759 			}
760 
761 			tmp = Radeon_INPLL(regs, asic, RADEON_PIXCLKS_CNTL);
762 			tmp &= ~(RADEON_PIX2CLK_ALWAYS_ONb         |
763 				RADEON_PIX2CLK_DAC_ALWAYS_ONb     |
764 				RADEON_PIXCLK_BLEND_ALWAYS_ONb    |
765 				RADEON_PIXCLK_GV_ALWAYS_ONb       |
766 				RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
767 				RADEON_PIXCLK_LVDS_ALWAYS_ONb     |
768 				RADEON_PIXCLK_TMDS_ALWAYS_ONb);
769 
770 			Radeon_OUTPLL(regs, asic, RADEON_PIXCLKS_CNTL, tmp);
771 			snooze(16000);
772 
773 			tmp = Radeon_INPLL(regs, asic, RADEON_VCLK_ECP_CNTL);
774 			tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb  |
775 				RADEON_PIXCLK_DAC_ALWAYS_ONb);
776 			Radeon_OUTPLL(regs, asic, RADEON_VCLK_ECP_CNTL, tmp);
777 		}
778 		SHOW_FLOW0( 3, "Dynamic Clock Scaling Disabled" );
779 		break;
780 	case 1:
781 		if ( di->num_crtc != 2 ) {
782 			tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_CNTL);
783 			if ((INREG( regs, RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) > RADEON_CFG_ATI_REV_A13) {
784 				tmp &= ~(RADEON_SCLK_FORCE_CP | RADEON_SCLK_FORCE_RB);
785 			}
786 			tmp &= ~(RADEON_SCLK_FORCE_HDP  | RADEON_SCLK_FORCE_DISP1 |
787 				RADEON_SCLK_FORCE_TOP  | RADEON_SCLK_FORCE_SE   |
788 				RADEON_SCLK_FORCE_IDCT | RADEON_SCLK_FORCE_RE   |
789 				RADEON_SCLK_FORCE_PB   | RADEON_SCLK_FORCE_TAM  |
790 				RADEON_SCLK_FORCE_TDM);
791 			Radeon_OUTPLL(regs, asic, RADEON_SCLK_CNTL, tmp);
792 		} else if ((asic == rt_r300)
793 				|| (asic == rt_r350)
794 				|| (asic == rt_rv350)) {
795 			if (asic == rt_rv350) {
796 				tmp = Radeon_INPLL(regs, asic, R300_SCLK_CNTL2);
797 				tmp &= ~(R300_SCLK_FORCE_TCL |
798 					R300_SCLK_FORCE_GA  |
799 					R300_SCLK_FORCE_CBA);
800 				tmp |=  (R300_SCLK_TCL_MAX_DYN_STOP_LAT |
801 					R300_SCLK_GA_MAX_DYN_STOP_LAT  |
802 					R300_SCLK_CBA_MAX_DYN_STOP_LAT);
803 				Radeon_OUTPLL(regs, asic, R300_SCLK_CNTL2, tmp);
804 
805 				tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_CNTL);
806 				tmp &= ~(RADEON_SCLK_FORCE_DISP2 | RADEON_SCLK_FORCE_CP      |
807 					RADEON_SCLK_FORCE_HDP   | RADEON_SCLK_FORCE_DISP1   |
808 					RADEON_SCLK_FORCE_TOP   | RADEON_SCLK_FORCE_E2      |
809 					R300_SCLK_FORCE_VAP     | RADEON_SCLK_FORCE_IDCT    |
810 					RADEON_SCLK_FORCE_VIP   | R300_SCLK_FORCE_SR        |
811 					R300_SCLK_FORCE_PX      | R300_SCLK_FORCE_TX        |
812 					R300_SCLK_FORCE_US      | RADEON_SCLK_FORCE_TV_SCLK |
813 					R300_SCLK_FORCE_SU      | RADEON_SCLK_FORCE_OV0);
814 					tmp |=  RADEON_DYN_STOP_LAT_MASK;
815 				Radeon_OUTPLL(regs, asic, RADEON_SCLK_CNTL, tmp);
816 
817 				tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_MORE_CNTL);
818 				tmp &= ~RADEON_SCLK_MORE_FORCEON;
819 				tmp |=  RADEON_SCLK_MORE_MAX_DYN_STOP_LAT;
820 				Radeon_OUTPLL(regs, asic, RADEON_SCLK_MORE_CNTL, tmp);
821 
822 				tmp = Radeon_INPLL(regs, asic, RADEON_VCLK_ECP_CNTL);
823 				tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
824 					RADEON_PIXCLK_DAC_ALWAYS_ONb);
825 				Radeon_OUTPLL(regs, asic, RADEON_VCLK_ECP_CNTL, tmp);
826 
827 				tmp = Radeon_INPLL(regs, asic, RADEON_PIXCLKS_CNTL);
828 				tmp |= (RADEON_PIX2CLK_ALWAYS_ONb         |
829 				RADEON_PIX2CLK_DAC_ALWAYS_ONb     |
830 					RADEON_DISP_TVOUT_PIXCLK_TV_ALWAYS_ONb |
831 					R300_DVOCLK_ALWAYS_ONb            |
832 					RADEON_PIXCLK_BLEND_ALWAYS_ONb    |
833 					RADEON_PIXCLK_GV_ALWAYS_ONb       |
834 					R300_PIXCLK_DVO_ALWAYS_ONb        |
835 					RADEON_PIXCLK_LVDS_ALWAYS_ONb     |
836 					RADEON_PIXCLK_TMDS_ALWAYS_ONb     |
837 					R300_PIXCLK_TRANS_ALWAYS_ONb      |
838 					R300_PIXCLK_TVO_ALWAYS_ONb        |
839 					R300_P2G2CLK_ALWAYS_ONb           |
840 					R300_P2G2CLK_ALWAYS_ONb);
841 				Radeon_OUTPLL(regs, asic, RADEON_PIXCLKS_CNTL, tmp);
842 
843 				tmp = Radeon_INPLL(regs, asic, RADEON_MCLK_MISC);
844 				tmp |= (RADEON_MC_MCLK_DYN_ENABLE |
845 					RADEON_IO_MCLK_DYN_ENABLE);
846 				Radeon_OUTPLL(regs, asic, RADEON_MCLK_MISC, tmp);
847 
848 				tmp = Radeon_INPLL(regs, asic, RADEON_MCLK_CNTL);
849 				tmp |= (RADEON_FORCEON_MCLKA |
850 					RADEON_FORCEON_MCLKB);
851 
852 				tmp &= ~(RADEON_FORCEON_YCLKA  |
853 					RADEON_FORCEON_YCLKB  |
854 					RADEON_FORCEON_MC);
855 
856 				/* Some releases of vbios have set DISABLE_MC_MCLKA
857 				and DISABLE_MC_MCLKB bits in the vbios table.  Setting these
858 				bits will cause H/W hang when reading video memory with dynamic clocking
859 				enabled. */
860 				if ((tmp & R300_DISABLE_MC_MCLKA) &&
861 				(tmp & R300_DISABLE_MC_MCLKB)) {
862 					/* If both bits are set, then check the active channels */
863 					tmp = Radeon_INPLL(regs, asic, RADEON_MCLK_CNTL);
864 					if (di->ram.width == 64) {
865 						if (INREG( regs, RADEON_MEM_CNTL) & R300_MEM_USE_CD_CH_ONLY)
866 						tmp &= ~R300_DISABLE_MC_MCLKB;
867 						else
868 						tmp &= ~R300_DISABLE_MC_MCLKA;
869 					} else {
870 						tmp &= ~(R300_DISABLE_MC_MCLKA |
871 						R300_DISABLE_MC_MCLKB);
872 					}
873 				}
874 
875 				Radeon_OUTPLL(regs, asic, RADEON_MCLK_CNTL, tmp);
876 			} else {
877 				tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_CNTL);
878 				tmp &= ~(R300_SCLK_FORCE_VAP);
879 				tmp |= RADEON_SCLK_FORCE_CP;
880 				Radeon_OUTPLL(regs, asic, RADEON_SCLK_CNTL, tmp);
881 				snooze(15000);
882 
883 				tmp = Radeon_INPLL(regs, asic, R300_SCLK_CNTL2);
884 				tmp &= ~(R300_SCLK_FORCE_TCL |
885 				R300_SCLK_FORCE_GA  |
886 				R300_SCLK_FORCE_CBA);
887 				Radeon_OUTPLL(regs, asic, R300_SCLK_CNTL2, tmp);
888 			}
889 		} else {
890 			tmp = Radeon_INPLL(regs, asic, RADEON_CLK_PWRMGT_CNTL);
891 
892 			tmp &= ~(RADEON_ACTIVE_HILO_LAT_MASK     |
893 				RADEON_DISP_DYN_STOP_LAT_MASK   |
894 				RADEON_DYN_STOP_MODE_MASK);
895 
896 			tmp |= (RADEON_ENGIN_DYNCLK_MODE |
897 			(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
898 			Radeon_OUTPLL(regs, asic, RADEON_CLK_PWRMGT_CNTL, tmp);
899 			snooze(15000);
900 
901 			tmp = Radeon_INPLL(regs, asic, RADEON_CLK_PIN_CNTL);
902 			tmp |= RADEON_SCLK_DYN_START_CNTL;
903 			Radeon_OUTPLL(regs, asic, RADEON_CLK_PIN_CNTL, tmp);
904 			snooze(15000);
905 
906 			/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
907 			to lockup randomly, leave them as set by BIOS.
908 			*/
909 			tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_CNTL);
910 			/*tmp &= RADEON_SCLK_SRC_SEL_MASK;*/
911 			tmp &= ~RADEON_SCLK_FORCEON_MASK;
912 
913 			/*RAGE_6::A11 A12 A12N1 A13, RV250::A11 A12, R300*/
914 			if (((asic == rt_rv250) &&
915 				((INREG( regs, RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) <
916 				  RADEON_CFG_ATI_REV_A13)) ||
917 				((asic == rt_rv100) &&
918 				((INREG( regs, RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) <=
919 				  RADEON_CFG_ATI_REV_A13)))
920 			{
921 				tmp |= RADEON_SCLK_FORCE_CP;
922 				tmp |= RADEON_SCLK_FORCE_VIP;
923 			}
924 
925 			Radeon_OUTPLL(regs, asic, RADEON_SCLK_CNTL, tmp);
926 
927 			if ((asic == rt_rv200) ||
928 				(asic == rt_rv250) ||
929 				(asic == rt_rv280)) {
930 				tmp = Radeon_INPLL(regs, asic, RADEON_SCLK_MORE_CNTL);
931 				tmp &= ~RADEON_SCLK_MORE_FORCEON;
932 
933 				/* RV200::A11 A12 RV250::A11 A12 */
934 				if (((asic == rt_rv200) ||
935 					 (asic == rt_rv250)) &&
936 					((INREG( regs, RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) <
937 					  RADEON_CFG_ATI_REV_A13))
938 				{
939 					tmp |= RADEON_SCLK_MORE_FORCEON;
940 				}
941 				Radeon_OUTPLL(regs, asic, RADEON_SCLK_MORE_CNTL, tmp);
942 				snooze(15000);
943 			}
944 
945 			/* RV200::A11 A12, RV250::A11 A12 */
946 			if (((asic == rt_rv200) ||
947 				 (asic == rt_rv250)) &&
948 				((INREG( regs, RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) <
949 				  RADEON_CFG_ATI_REV_A13))
950 			{
951 				tmp = Radeon_INPLL(regs, asic, RADEON_PLL_PWRMGT_CNTL);
952 				tmp |= RADEON_TCL_BYPASS_DISABLE;
953 				Radeon_OUTPLL(regs, asic, RADEON_PLL_PWRMGT_CNTL, tmp);
954 			}
955 			snooze(15000);
956 
957 			/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK)*/
958 			tmp = Radeon_INPLL(regs, asic, RADEON_PIXCLKS_CNTL);
959 			tmp |=  (RADEON_PIX2CLK_ALWAYS_ONb         |
960 				RADEON_PIX2CLK_DAC_ALWAYS_ONb     |
961 				RADEON_PIXCLK_BLEND_ALWAYS_ONb    |
962 				RADEON_PIXCLK_GV_ALWAYS_ONb       |
963 				RADEON_PIXCLK_DIG_TMDS_ALWAYS_ONb |
964 				RADEON_PIXCLK_LVDS_ALWAYS_ONb     |
965 				RADEON_PIXCLK_TMDS_ALWAYS_ONb);
966 
967 			Radeon_OUTPLL(regs, asic, RADEON_PIXCLKS_CNTL, tmp);
968 			snooze(15000);
969 
970 			tmp = Radeon_INPLL(regs, asic, RADEON_VCLK_ECP_CNTL);
971 			tmp |= (RADEON_PIXCLK_ALWAYS_ONb  |
972 				RADEON_PIXCLK_DAC_ALWAYS_ONb);
973 
974 			Radeon_OUTPLL(regs, asic, RADEON_VCLK_ECP_CNTL, tmp);
975 			snooze(15000);
976 		}
977 		SHOW_FLOW0( 3, "Dynamic Clock Scaling Enabled" );
978 		break;
979 	default:
980 		break;
981 	}
982 }
983